VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 70861

Last change on this file since 70861 was 70861, checked in by vboxsync, 7 years ago

VMM/IEM: ifdef space indents.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 639.0 KB
Line 
1/* $Id: IEMAll.cpp 70861 2018-02-05 10:49:59Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
890
891#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
892IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
893#endif
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
895IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
896
897#ifdef VBOX_WITH_NESTED_HWVIRT
898IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
899 uint64_t uExitInfo2);
900IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
901 uint32_t uErr, uint64_t uCr2);
902#endif
903
904/**
905 * Sets the pass up status.
906 *
907 * @returns VINF_SUCCESS.
908 * @param pVCpu The cross context virtual CPU structure of the
909 * calling thread.
910 * @param rcPassUp The pass up status. Must be informational.
911 * VINF_SUCCESS is not allowed.
912 */
913IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
914{
915 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
916
917 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
918 if (rcOldPassUp == VINF_SUCCESS)
919 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
920 /* If both are EM scheduling codes, use EM priority rules. */
921 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
922 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
923 {
924 if (rcPassUp < rcOldPassUp)
925 {
926 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
927 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
928 }
929 else
930 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
931 }
932 /* Override EM scheduling with specific status code. */
933 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
934 {
935 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
936 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
937 }
938 /* Don't override specific status code, first come first served. */
939 else
940 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Calculates the CPU mode.
947 *
948 * This is mainly for updating IEMCPU::enmCpuMode.
949 *
950 * @returns CPU mode.
951 * @param pCtx The register context for the CPU.
952 */
953DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
954{
955 if (CPUMIsGuestIn64BitCodeEx(pCtx))
956 return IEMMODE_64BIT;
957 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
958 return IEMMODE_32BIT;
959 return IEMMODE_16BIT;
960}
961
962
963/**
964 * Initializes the execution state.
965 *
966 * @param pVCpu The cross context virtual CPU structure of the
967 * calling thread.
968 * @param fBypassHandlers Whether to bypass access handlers.
969 *
970 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
971 * side-effects in strict builds.
972 */
973DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
974{
975 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
976
977 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
978
979#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
987 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
988#endif
989
990#ifdef VBOX_WITH_RAW_MODE_NOT_R0
991 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
992#endif
993 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
994 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
995#ifdef VBOX_STRICT
996 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1000 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1001 pVCpu->iem.s.uRexReg = 127;
1002 pVCpu->iem.s.uRexB = 127;
1003 pVCpu->iem.s.uRexIndex = 127;
1004 pVCpu->iem.s.iEffSeg = 127;
1005 pVCpu->iem.s.idxPrefix = 127;
1006 pVCpu->iem.s.uVex3rdReg = 127;
1007 pVCpu->iem.s.uVexLength = 127;
1008 pVCpu->iem.s.fEvexStuff = 127;
1009 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1010# ifdef IEM_WITH_CODE_TLB
1011 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1012 pVCpu->iem.s.pbInstrBuf = NULL;
1013 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1014 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1015 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1016 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1017# else
1018 pVCpu->iem.s.offOpcode = 127;
1019 pVCpu->iem.s.cbOpcode = 127;
1020# endif
1021#endif
1022
1023 pVCpu->iem.s.cActiveMappings = 0;
1024 pVCpu->iem.s.iNextMapping = 0;
1025 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1026 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1029 && pCtx->cs.u64Base == 0
1030 && pCtx->cs.u32Limit == UINT32_MAX
1031 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1032 if (!pVCpu->iem.s.fInPatchCode)
1033 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1034#endif
1035
1036#ifdef IEM_VERIFICATION_MODE_FULL
1037 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1038 pVCpu->iem.s.fNoRem = true;
1039#endif
1040}
1041
1042#ifdef VBOX_WITH_NESTED_HWVIRT
1043/**
1044 * Performs a minimal reinitialization of the execution state.
1045 *
1046 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1047 * 'world-switch' types operations on the CPU. Currently only nested
1048 * hardware-virtualization uses it.
1049 *
1050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1051 */
1052IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1053{
1054 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1055 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1056 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1057
1058 pVCpu->iem.s.uCpl = uCpl;
1059 pVCpu->iem.s.enmCpuMode = enmMode;
1060 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1061 pVCpu->iem.s.enmEffAddrMode = enmMode;
1062 if (enmMode != IEMMODE_64BIT)
1063 {
1064 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1065 pVCpu->iem.s.enmEffOpSize = enmMode;
1066 }
1067 else
1068 {
1069 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1070 pVCpu->iem.s.enmEffOpSize = enmMode;
1071 }
1072 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1073#ifndef IEM_WITH_CODE_TLB
1074 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1075 pVCpu->iem.s.offOpcode = 0;
1076 pVCpu->iem.s.cbOpcode = 0;
1077#endif
1078}
1079#endif
1080
1081/**
1082 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1083 *
1084 * @param pVCpu The cross context virtual CPU structure of the
1085 * calling thread.
1086 */
1087DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1088{
1089 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1090#ifdef IEM_VERIFICATION_MODE_FULL
1091 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1092#endif
1093#ifdef VBOX_STRICT
1094# ifdef IEM_WITH_CODE_TLB
1095 NOREF(pVCpu);
1096# else
1097 pVCpu->iem.s.cbOpcode = 0;
1098# endif
1099#else
1100 NOREF(pVCpu);
1101#endif
1102}
1103
1104
1105/**
1106 * Initializes the decoder state.
1107 *
1108 * iemReInitDecoder is mostly a copy of this function.
1109 *
1110 * @param pVCpu The cross context virtual CPU structure of the
1111 * calling thread.
1112 * @param fBypassHandlers Whether to bypass access handlers.
1113 */
1114DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1115{
1116 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1117
1118 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1119
1120#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1129#endif
1130
1131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1132 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1133#endif
1134 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1135#ifdef IEM_VERIFICATION_MODE_FULL
1136 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1137 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1138#endif
1139 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1140 pVCpu->iem.s.enmCpuMode = enmMode;
1141 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1142 pVCpu->iem.s.enmEffAddrMode = enmMode;
1143 if (enmMode != IEMMODE_64BIT)
1144 {
1145 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffOpSize = enmMode;
1147 }
1148 else
1149 {
1150 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1151 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1152 }
1153 pVCpu->iem.s.fPrefixes = 0;
1154 pVCpu->iem.s.uRexReg = 0;
1155 pVCpu->iem.s.uRexB = 0;
1156 pVCpu->iem.s.uRexIndex = 0;
1157 pVCpu->iem.s.idxPrefix = 0;
1158 pVCpu->iem.s.uVex3rdReg = 0;
1159 pVCpu->iem.s.uVexLength = 0;
1160 pVCpu->iem.s.fEvexStuff = 0;
1161 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1162#ifdef IEM_WITH_CODE_TLB
1163 pVCpu->iem.s.pbInstrBuf = NULL;
1164 pVCpu->iem.s.offInstrNextByte = 0;
1165 pVCpu->iem.s.offCurInstrStart = 0;
1166# ifdef VBOX_STRICT
1167 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1168 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1169 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1170# endif
1171#else
1172 pVCpu->iem.s.offOpcode = 0;
1173 pVCpu->iem.s.cbOpcode = 0;
1174#endif
1175 pVCpu->iem.s.cActiveMappings = 0;
1176 pVCpu->iem.s.iNextMapping = 0;
1177 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1178 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1179#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1180 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1181 && pCtx->cs.u64Base == 0
1182 && pCtx->cs.u32Limit == UINT32_MAX
1183 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1184 if (!pVCpu->iem.s.fInPatchCode)
1185 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1186#endif
1187
1188#ifdef DBGFTRACE_ENABLED
1189 switch (enmMode)
1190 {
1191 case IEMMODE_64BIT:
1192 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1193 break;
1194 case IEMMODE_32BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1196 break;
1197 case IEMMODE_16BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 }
1201#endif
1202}
1203
1204
1205/**
1206 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1207 *
1208 * This is mostly a copy of iemInitDecoder.
1209 *
1210 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1211 */
1212DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1213{
1214 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1215
1216 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1217
1218#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1227#endif
1228
1229 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1230#ifdef IEM_VERIFICATION_MODE_FULL
1231 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1232 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1233#endif
1234 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1235 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1236 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1237 pVCpu->iem.s.enmEffAddrMode = enmMode;
1238 if (enmMode != IEMMODE_64BIT)
1239 {
1240 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1241 pVCpu->iem.s.enmEffOpSize = enmMode;
1242 }
1243 else
1244 {
1245 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1246 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1247 }
1248 pVCpu->iem.s.fPrefixes = 0;
1249 pVCpu->iem.s.uRexReg = 0;
1250 pVCpu->iem.s.uRexB = 0;
1251 pVCpu->iem.s.uRexIndex = 0;
1252 pVCpu->iem.s.idxPrefix = 0;
1253 pVCpu->iem.s.uVex3rdReg = 0;
1254 pVCpu->iem.s.uVexLength = 0;
1255 pVCpu->iem.s.fEvexStuff = 0;
1256 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1257#ifdef IEM_WITH_CODE_TLB
1258 if (pVCpu->iem.s.pbInstrBuf)
1259 {
1260 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1261 - pVCpu->iem.s.uInstrBufPc;
1262 if (off < pVCpu->iem.s.cbInstrBufTotal)
1263 {
1264 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1265 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1266 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1267 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1268 else
1269 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1270 }
1271 else
1272 {
1273 pVCpu->iem.s.pbInstrBuf = NULL;
1274 pVCpu->iem.s.offInstrNextByte = 0;
1275 pVCpu->iem.s.offCurInstrStart = 0;
1276 pVCpu->iem.s.cbInstrBuf = 0;
1277 pVCpu->iem.s.cbInstrBufTotal = 0;
1278 }
1279 }
1280 else
1281 {
1282 pVCpu->iem.s.offInstrNextByte = 0;
1283 pVCpu->iem.s.offCurInstrStart = 0;
1284 pVCpu->iem.s.cbInstrBuf = 0;
1285 pVCpu->iem.s.cbInstrBufTotal = 0;
1286 }
1287#else
1288 pVCpu->iem.s.cbOpcode = 0;
1289 pVCpu->iem.s.offOpcode = 0;
1290#endif
1291 Assert(pVCpu->iem.s.cActiveMappings == 0);
1292 pVCpu->iem.s.iNextMapping = 0;
1293 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1294 Assert(pVCpu->iem.s.fBypassHandlers == false);
1295#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1296 if (!pVCpu->iem.s.fInPatchCode)
1297 { /* likely */ }
1298 else
1299 {
1300 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1301 && pCtx->cs.u64Base == 0
1302 && pCtx->cs.u32Limit == UINT32_MAX
1303 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1304 if (!pVCpu->iem.s.fInPatchCode)
1305 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1306 }
1307#endif
1308
1309#ifdef DBGFTRACE_ENABLED
1310 switch (enmMode)
1311 {
1312 case IEMMODE_64BIT:
1313 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1314 break;
1315 case IEMMODE_32BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1317 break;
1318 case IEMMODE_16BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 }
1322#endif
1323}
1324
1325
1326
1327/**
1328 * Prefetch opcodes the first time when starting executing.
1329 *
1330 * @returns Strict VBox status code.
1331 * @param pVCpu The cross context virtual CPU structure of the
1332 * calling thread.
1333 * @param fBypassHandlers Whether to bypass access handlers.
1334 */
1335IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1336{
1337#ifdef IEM_VERIFICATION_MODE_FULL
1338 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1339#endif
1340 iemInitDecoder(pVCpu, fBypassHandlers);
1341
1342#ifdef IEM_WITH_CODE_TLB
1343 /** @todo Do ITLB lookup here. */
1344
1345#else /* !IEM_WITH_CODE_TLB */
1346
1347 /*
1348 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1349 *
1350 * First translate CS:rIP to a physical address.
1351 */
1352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1353 uint32_t cbToTryRead;
1354 RTGCPTR GCPtrPC;
1355 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1356 {
1357 cbToTryRead = PAGE_SIZE;
1358 GCPtrPC = pCtx->rip;
1359 if (IEM_IS_CANONICAL(GCPtrPC))
1360 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1361 else
1362 return iemRaiseGeneralProtectionFault0(pVCpu);
1363 }
1364 else
1365 {
1366 uint32_t GCPtrPC32 = pCtx->eip;
1367 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1368 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1369 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1370 else
1371 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1372 if (cbToTryRead) { /* likely */ }
1373 else /* overflowed */
1374 {
1375 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1376 cbToTryRead = UINT32_MAX;
1377 }
1378 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1379 Assert(GCPtrPC <= UINT32_MAX);
1380 }
1381
1382# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1383 /* Allow interpretation of patch manager code blocks since they can for
1384 instance throw #PFs for perfectly good reasons. */
1385 if (pVCpu->iem.s.fInPatchCode)
1386 {
1387 size_t cbRead = 0;
1388 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1389 AssertRCReturn(rc, rc);
1390 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1391 return VINF_SUCCESS;
1392 }
1393# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1394
1395 RTGCPHYS GCPhys;
1396 uint64_t fFlags;
1397 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1398 if (RT_SUCCESS(rc)) { /* probable */ }
1399 else
1400 {
1401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1402 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1403 }
1404 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1405 else
1406 {
1407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1408 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1409 }
1410 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1411 else
1412 {
1413 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1414 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1415 }
1416 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1417 /** @todo Check reserved bits and such stuff. PGM is better at doing
1418 * that, so do it when implementing the guest virtual address
1419 * TLB... */
1420
1421# ifdef IEM_VERIFICATION_MODE_FULL
1422 /*
1423 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1424 * instruction.
1425 */
1426 /** @todo optimize this differently by not using PGMPhysRead. */
1427 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1428 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1429 if ( offPrevOpcodes < cbOldOpcodes
1430 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1431 {
1432 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1433 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1434 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1435 pVCpu->iem.s.cbOpcode = cbNew;
1436 return VINF_SUCCESS;
1437 }
1438# endif
1439
1440 /*
1441 * Read the bytes at this address.
1442 */
1443 PVM pVM = pVCpu->CTX_SUFF(pVM);
1444# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1445 size_t cbActual;
1446 if ( PATMIsEnabled(pVM)
1447 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1448 {
1449 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1450 Assert(cbActual > 0);
1451 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1452 }
1453 else
1454# endif
1455 {
1456 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1457 if (cbToTryRead > cbLeftOnPage)
1458 cbToTryRead = cbLeftOnPage;
1459 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1460 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1461
1462 if (!pVCpu->iem.s.fBypassHandlers)
1463 {
1464 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1465 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1466 { /* likely */ }
1467 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1468 {
1469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1470 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1471 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1472 }
1473 else
1474 {
1475 Log((RT_SUCCESS(rcStrict)
1476 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1477 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1478 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1479 return rcStrict;
1480 }
1481 }
1482 else
1483 {
1484 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1485 if (RT_SUCCESS(rc))
1486 { /* likely */ }
1487 else
1488 {
1489 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1490 GCPtrPC, GCPhys, rc, cbToTryRead));
1491 return rc;
1492 }
1493 }
1494 pVCpu->iem.s.cbOpcode = cbToTryRead;
1495 }
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664//__debugbreak();
1665 for (;;)
1666 {
1667 Assert(cbDst <= 8);
1668 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1669
1670 /*
1671 * We might have a partial buffer match, deal with that first to make the
1672 * rest simpler. This is the first part of the cross page/buffer case.
1673 */
1674 if (pVCpu->iem.s.pbInstrBuf != NULL)
1675 {
1676 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1677 {
1678 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1679 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1680 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1681
1682 cbDst -= cbCopy;
1683 pvDst = (uint8_t *)pvDst + cbCopy;
1684 offBuf += cbCopy;
1685 pVCpu->iem.s.offInstrNextByte += offBuf;
1686 }
1687 }
1688
1689 /*
1690 * Check segment limit, figuring how much we're allowed to access at this point.
1691 *
1692 * We will fault immediately if RIP is past the segment limit / in non-canonical
1693 * territory. If we do continue, there are one or more bytes to read before we
1694 * end up in trouble and we need to do that first before faulting.
1695 */
1696 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1697 RTGCPTR GCPtrFirst;
1698 uint32_t cbMaxRead;
1699 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1700 {
1701 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1702 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1703 { /* likely */ }
1704 else
1705 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1706 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1707 }
1708 else
1709 {
1710 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1711 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1712 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1713 { /* likely */ }
1714 else
1715 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1716 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1717 if (cbMaxRead != 0)
1718 { /* likely */ }
1719 else
1720 {
1721 /* Overflowed because address is 0 and limit is max. */
1722 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1723 cbMaxRead = X86_PAGE_SIZE;
1724 }
1725 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1726 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1727 if (cbMaxRead2 < cbMaxRead)
1728 cbMaxRead = cbMaxRead2;
1729 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1730 }
1731
1732 /*
1733 * Get the TLB entry for this piece of code.
1734 */
1735 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1736 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1737 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1738 if (pTlbe->uTag == uTag)
1739 {
1740 /* likely when executing lots of code, otherwise unlikely */
1741# ifdef VBOX_WITH_STATISTICS
1742 pVCpu->iem.s.CodeTlb.cTlbHits++;
1743# endif
1744 }
1745 else
1746 {
1747 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1748# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1749 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1750 {
1751 pTlbe->uTag = uTag;
1752 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1753 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1754 pTlbe->GCPhys = NIL_RTGCPHYS;
1755 pTlbe->pbMappingR3 = NULL;
1756 }
1757 else
1758# endif
1759 {
1760 RTGCPHYS GCPhys;
1761 uint64_t fFlags;
1762 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1763 if (RT_FAILURE(rc))
1764 {
1765 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1766 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1767 }
1768
1769 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1770 pTlbe->uTag = uTag;
1771 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1772 pTlbe->GCPhys = GCPhys;
1773 pTlbe->pbMappingR3 = NULL;
1774 }
1775 }
1776
1777 /*
1778 * Check TLB page table level access flags.
1779 */
1780 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1781 {
1782 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1783 {
1784 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1785 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1786 }
1787 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1788 {
1789 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1791 }
1792 }
1793
1794# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1795 /*
1796 * Allow interpretation of patch manager code blocks since they can for
1797 * instance throw #PFs for perfectly good reasons.
1798 */
1799 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1800 { /* no unlikely */ }
1801 else
1802 {
1803 /** @todo Could be optimized this a little in ring-3 if we liked. */
1804 size_t cbRead = 0;
1805 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1806 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1807 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1808 return;
1809 }
1810# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1811
1812 /*
1813 * Look up the physical page info if necessary.
1814 */
1815 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1816 { /* not necessary */ }
1817 else
1818 {
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1821 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1822 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1823 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1824 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1825 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1826 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1827 }
1828
1829# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1830 /*
1831 * Try do a direct read using the pbMappingR3 pointer.
1832 */
1833 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1834 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1835 {
1836 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1837 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1838 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1839 {
1840 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1841 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1842 }
1843 else
1844 {
1845 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1846 Assert(cbInstr < cbMaxRead);
1847 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1848 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1849 }
1850 if (cbDst <= cbMaxRead)
1851 {
1852 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1853 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1854 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1855 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1856 return;
1857 }
1858 pVCpu->iem.s.pbInstrBuf = NULL;
1859
1860 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1861 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1862 }
1863 else
1864# endif
1865#if 0
1866 /*
1867 * If there is no special read handling, so we can read a bit more and
1868 * put it in the prefetch buffer.
1869 */
1870 if ( cbDst < cbMaxRead
1871 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1872 {
1873 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1874 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1875 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1876 { /* likely */ }
1877 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1878 {
1879 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1880 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1882 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1883 }
1884 else
1885 {
1886 Log((RT_SUCCESS(rcStrict)
1887 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1888 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1889 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1890 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1891 }
1892 }
1893 /*
1894 * Special read handling, so only read exactly what's needed.
1895 * This is a highly unlikely scenario.
1896 */
1897 else
1898#endif
1899 {
1900 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1901 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1902 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1903 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1904 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1905 { /* likely */ }
1906 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1907 {
1908 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1909 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1911 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1912 }
1913 else
1914 {
1915 Log((RT_SUCCESS(rcStrict)
1916 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1917 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1918 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1919 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1920 }
1921 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1922 if (cbToRead == cbDst)
1923 return;
1924 }
1925
1926 /*
1927 * More to read, loop.
1928 */
1929 cbDst -= cbMaxRead;
1930 pvDst = (uint8_t *)pvDst + cbMaxRead;
1931 }
1932#else
1933 RT_NOREF(pvDst, cbDst);
1934 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1935#endif
1936}
1937
1938#else
1939
1940/**
1941 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1942 * exception if it fails.
1943 *
1944 * @returns Strict VBox status code.
1945 * @param pVCpu The cross context virtual CPU structure of the
1946 * calling thread.
1947 * @param cbMin The minimum number of bytes relative offOpcode
1948 * that must be read.
1949 */
1950IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1951{
1952 /*
1953 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1954 *
1955 * First translate CS:rIP to a physical address.
1956 */
1957 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1958 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1959 uint32_t cbToTryRead;
1960 RTGCPTR GCPtrNext;
1961 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1962 {
1963 cbToTryRead = PAGE_SIZE;
1964 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1965 if (!IEM_IS_CANONICAL(GCPtrNext))
1966 return iemRaiseGeneralProtectionFault0(pVCpu);
1967 }
1968 else
1969 {
1970 uint32_t GCPtrNext32 = pCtx->eip;
1971 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1972 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1973 if (GCPtrNext32 > pCtx->cs.u32Limit)
1974 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1975 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1976 if (!cbToTryRead) /* overflowed */
1977 {
1978 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1979 cbToTryRead = UINT32_MAX;
1980 /** @todo check out wrapping around the code segment. */
1981 }
1982 if (cbToTryRead < cbMin - cbLeft)
1983 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1984 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1985 }
1986
1987 /* Only read up to the end of the page, and make sure we don't read more
1988 than the opcode buffer can hold. */
1989 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1990 if (cbToTryRead > cbLeftOnPage)
1991 cbToTryRead = cbLeftOnPage;
1992 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1993 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1994/** @todo r=bird: Convert assertion into undefined opcode exception? */
1995 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1996
1997# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1998 /* Allow interpretation of patch manager code blocks since they can for
1999 instance throw #PFs for perfectly good reasons. */
2000 if (pVCpu->iem.s.fInPatchCode)
2001 {
2002 size_t cbRead = 0;
2003 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2004 AssertRCReturn(rc, rc);
2005 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2006 return VINF_SUCCESS;
2007 }
2008# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2009
2010 RTGCPHYS GCPhys;
2011 uint64_t fFlags;
2012 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2013 if (RT_FAILURE(rc))
2014 {
2015 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2016 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2017 }
2018 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2019 {
2020 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2021 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2022 }
2023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2024 {
2025 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2027 }
2028 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2029 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2030 /** @todo Check reserved bits and such stuff. PGM is better at doing
2031 * that, so do it when implementing the guest virtual address
2032 * TLB... */
2033
2034 /*
2035 * Read the bytes at this address.
2036 *
2037 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2038 * and since PATM should only patch the start of an instruction there
2039 * should be no need to check again here.
2040 */
2041 if (!pVCpu->iem.s.fBypassHandlers)
2042 {
2043 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2044 cbToTryRead, PGMACCESSORIGIN_IEM);
2045 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2046 { /* likely */ }
2047 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2048 {
2049 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2050 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2051 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2052 }
2053 else
2054 {
2055 Log((RT_SUCCESS(rcStrict)
2056 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2057 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2058 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2059 return rcStrict;
2060 }
2061 }
2062 else
2063 {
2064 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2065 if (RT_SUCCESS(rc))
2066 { /* likely */ }
2067 else
2068 {
2069 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2070 return rc;
2071 }
2072 }
2073 pVCpu->iem.s.cbOpcode += cbToTryRead;
2074 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2075
2076 return VINF_SUCCESS;
2077}
2078
2079#endif /* !IEM_WITH_CODE_TLB */
2080#ifndef IEM_WITH_SETJMP
2081
2082/**
2083 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2084 *
2085 * @returns Strict VBox status code.
2086 * @param pVCpu The cross context virtual CPU structure of the
2087 * calling thread.
2088 * @param pb Where to return the opcode byte.
2089 */
2090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2091{
2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2093 if (rcStrict == VINF_SUCCESS)
2094 {
2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2096 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2097 pVCpu->iem.s.offOpcode = offOpcode + 1;
2098 }
2099 else
2100 *pb = 0;
2101 return rcStrict;
2102}
2103
2104
2105/**
2106 * Fetches the next opcode byte.
2107 *
2108 * @returns Strict VBox status code.
2109 * @param pVCpu The cross context virtual CPU structure of the
2110 * calling thread.
2111 * @param pu8 Where to return the opcode byte.
2112 */
2113DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2114{
2115 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2116 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2117 {
2118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2119 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2120 return VINF_SUCCESS;
2121 }
2122 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2123}
2124
2125#else /* IEM_WITH_SETJMP */
2126
2127/**
2128 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2129 *
2130 * @returns The opcode byte.
2131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2132 */
2133DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2134{
2135# ifdef IEM_WITH_CODE_TLB
2136 uint8_t u8;
2137 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2138 return u8;
2139# else
2140 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2141 if (rcStrict == VINF_SUCCESS)
2142 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2143 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2144# endif
2145}
2146
2147
2148/**
2149 * Fetches the next opcode byte, longjmp on error.
2150 *
2151 * @returns The opcode byte.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 */
2154DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2155{
2156# ifdef IEM_WITH_CODE_TLB
2157 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2158 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2159 if (RT_LIKELY( pbBuf != NULL
2160 && offBuf < pVCpu->iem.s.cbInstrBuf))
2161 {
2162 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2163 return pbBuf[offBuf];
2164 }
2165# else
2166 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2167 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2168 {
2169 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2170 return pVCpu->iem.s.abOpcode[offOpcode];
2171 }
2172# endif
2173 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2174}
2175
2176#endif /* IEM_WITH_SETJMP */
2177
2178/**
2179 * Fetches the next opcode byte, returns automatically on failure.
2180 *
2181 * @param a_pu8 Where to return the opcode byte.
2182 * @remark Implicitly references pVCpu.
2183 */
2184#ifndef IEM_WITH_SETJMP
2185# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2186 do \
2187 { \
2188 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2189 if (rcStrict2 == VINF_SUCCESS) \
2190 { /* likely */ } \
2191 else \
2192 return rcStrict2; \
2193 } while (0)
2194#else
2195# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2196#endif /* IEM_WITH_SETJMP */
2197
2198
2199#ifndef IEM_WITH_SETJMP
2200/**
2201 * Fetches the next signed byte from the opcode stream.
2202 *
2203 * @returns Strict VBox status code.
2204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2205 * @param pi8 Where to return the signed byte.
2206 */
2207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2208{
2209 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2210}
2211#endif /* !IEM_WITH_SETJMP */
2212
2213
2214/**
2215 * Fetches the next signed byte from the opcode stream, returning automatically
2216 * on failure.
2217 *
2218 * @param a_pi8 Where to return the signed byte.
2219 * @remark Implicitly references pVCpu.
2220 */
2221#ifndef IEM_WITH_SETJMP
2222# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2223 do \
2224 { \
2225 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2226 if (rcStrict2 != VINF_SUCCESS) \
2227 return rcStrict2; \
2228 } while (0)
2229#else /* IEM_WITH_SETJMP */
2230# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2231
2232#endif /* IEM_WITH_SETJMP */
2233
2234#ifndef IEM_WITH_SETJMP
2235
2236/**
2237 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2238 *
2239 * @returns Strict VBox status code.
2240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2241 * @param pu16 Where to return the opcode dword.
2242 */
2243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2244{
2245 uint8_t u8;
2246 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2247 if (rcStrict == VINF_SUCCESS)
2248 *pu16 = (int8_t)u8;
2249 return rcStrict;
2250}
2251
2252
2253/**
2254 * Fetches the next signed byte from the opcode stream, extending it to
2255 * unsigned 16-bit.
2256 *
2257 * @returns Strict VBox status code.
2258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2259 * @param pu16 Where to return the unsigned word.
2260 */
2261DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2262{
2263 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2264 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2265 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2266
2267 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2268 pVCpu->iem.s.offOpcode = offOpcode + 1;
2269 return VINF_SUCCESS;
2270}
2271
2272#endif /* !IEM_WITH_SETJMP */
2273
2274/**
2275 * Fetches the next signed byte from the opcode stream and sign-extending it to
2276 * a word, returning automatically on failure.
2277 *
2278 * @param a_pu16 Where to return the word.
2279 * @remark Implicitly references pVCpu.
2280 */
2281#ifndef IEM_WITH_SETJMP
2282# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2283 do \
2284 { \
2285 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2286 if (rcStrict2 != VINF_SUCCESS) \
2287 return rcStrict2; \
2288 } while (0)
2289#else
2290# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2291#endif
2292
2293#ifndef IEM_WITH_SETJMP
2294
2295/**
2296 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2297 *
2298 * @returns Strict VBox status code.
2299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2300 * @param pu32 Where to return the opcode dword.
2301 */
2302DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2303{
2304 uint8_t u8;
2305 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2306 if (rcStrict == VINF_SUCCESS)
2307 *pu32 = (int8_t)u8;
2308 return rcStrict;
2309}
2310
2311
2312/**
2313 * Fetches the next signed byte from the opcode stream, extending it to
2314 * unsigned 32-bit.
2315 *
2316 * @returns Strict VBox status code.
2317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2318 * @param pu32 Where to return the unsigned dword.
2319 */
2320DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2321{
2322 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2323 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2324 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2325
2326 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2327 pVCpu->iem.s.offOpcode = offOpcode + 1;
2328 return VINF_SUCCESS;
2329}
2330
2331#endif /* !IEM_WITH_SETJMP */
2332
2333/**
2334 * Fetches the next signed byte from the opcode stream and sign-extending it to
2335 * a word, returning automatically on failure.
2336 *
2337 * @param a_pu32 Where to return the word.
2338 * @remark Implicitly references pVCpu.
2339 */
2340#ifndef IEM_WITH_SETJMP
2341#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2342 do \
2343 { \
2344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2345 if (rcStrict2 != VINF_SUCCESS) \
2346 return rcStrict2; \
2347 } while (0)
2348#else
2349# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2350#endif
2351
2352#ifndef IEM_WITH_SETJMP
2353
2354/**
2355 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2356 *
2357 * @returns Strict VBox status code.
2358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2359 * @param pu64 Where to return the opcode qword.
2360 */
2361DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2362{
2363 uint8_t u8;
2364 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2365 if (rcStrict == VINF_SUCCESS)
2366 *pu64 = (int8_t)u8;
2367 return rcStrict;
2368}
2369
2370
2371/**
2372 * Fetches the next signed byte from the opcode stream, extending it to
2373 * unsigned 64-bit.
2374 *
2375 * @returns Strict VBox status code.
2376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2377 * @param pu64 Where to return the unsigned qword.
2378 */
2379DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2380{
2381 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2382 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2383 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2384
2385 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2386 pVCpu->iem.s.offOpcode = offOpcode + 1;
2387 return VINF_SUCCESS;
2388}
2389
2390#endif /* !IEM_WITH_SETJMP */
2391
2392
2393/**
2394 * Fetches the next signed byte from the opcode stream and sign-extending it to
2395 * a word, returning automatically on failure.
2396 *
2397 * @param a_pu64 Where to return the word.
2398 * @remark Implicitly references pVCpu.
2399 */
2400#ifndef IEM_WITH_SETJMP
2401# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2402 do \
2403 { \
2404 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2405 if (rcStrict2 != VINF_SUCCESS) \
2406 return rcStrict2; \
2407 } while (0)
2408#else
2409# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2410#endif
2411
2412
2413#ifndef IEM_WITH_SETJMP
2414
2415/**
2416 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2417 *
2418 * @returns Strict VBox status code.
2419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2420 * @param pu16 Where to return the opcode word.
2421 */
2422DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2423{
2424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2425 if (rcStrict == VINF_SUCCESS)
2426 {
2427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2428# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2429 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2430# else
2431 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2432# endif
2433 pVCpu->iem.s.offOpcode = offOpcode + 2;
2434 }
2435 else
2436 *pu16 = 0;
2437 return rcStrict;
2438}
2439
2440
2441/**
2442 * Fetches the next opcode word.
2443 *
2444 * @returns Strict VBox status code.
2445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2446 * @param pu16 Where to return the opcode word.
2447 */
2448DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2449{
2450 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2451 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2452 {
2453 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2454# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2455 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2456# else
2457 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2458# endif
2459 return VINF_SUCCESS;
2460 }
2461 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2462}
2463
2464#else /* IEM_WITH_SETJMP */
2465
2466/**
2467 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2468 *
2469 * @returns The opcode word.
2470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2471 */
2472DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2473{
2474# ifdef IEM_WITH_CODE_TLB
2475 uint16_t u16;
2476 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2477 return u16;
2478# else
2479 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2480 if (rcStrict == VINF_SUCCESS)
2481 {
2482 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offOpcode += 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 }
2490 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2491# endif
2492}
2493
2494
2495/**
2496 * Fetches the next opcode word, longjmp on error.
2497 *
2498 * @returns The opcode word.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 */
2501DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2502{
2503# ifdef IEM_WITH_CODE_TLB
2504 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2505 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2506 if (RT_LIKELY( pbBuf != NULL
2507 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2508 {
2509 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2510# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2511 return *(uint16_t const *)&pbBuf[offBuf];
2512# else
2513 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2514# endif
2515 }
2516# else
2517 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2518 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2519 {
2520 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2521# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2522 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2523# else
2524 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2525# endif
2526 }
2527# endif
2528 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2529}
2530
2531#endif /* IEM_WITH_SETJMP */
2532
2533
2534/**
2535 * Fetches the next opcode word, returns automatically on failure.
2536 *
2537 * @param a_pu16 Where to return the opcode word.
2538 * @remark Implicitly references pVCpu.
2539 */
2540#ifndef IEM_WITH_SETJMP
2541# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2542 do \
2543 { \
2544 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2545 if (rcStrict2 != VINF_SUCCESS) \
2546 return rcStrict2; \
2547 } while (0)
2548#else
2549# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2550#endif
2551
2552#ifndef IEM_WITH_SETJMP
2553
2554/**
2555 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2556 *
2557 * @returns Strict VBox status code.
2558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2559 * @param pu32 Where to return the opcode double word.
2560 */
2561DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2562{
2563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2564 if (rcStrict == VINF_SUCCESS)
2565 {
2566 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2567 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu32 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word, zero extending it to a double word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu32 Where to return the opcode double word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2584{
2585 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2587 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2588
2589 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590 pVCpu->iem.s.offOpcode = offOpcode + 2;
2591 return VINF_SUCCESS;
2592}
2593
2594#endif /* !IEM_WITH_SETJMP */
2595
2596
2597/**
2598 * Fetches the next opcode word and zero extends it to a double word, returns
2599 * automatically on failure.
2600 *
2601 * @param a_pu32 Where to return the opcode double word.
2602 * @remark Implicitly references pVCpu.
2603 */
2604#ifndef IEM_WITH_SETJMP
2605# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2606 do \
2607 { \
2608 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2609 if (rcStrict2 != VINF_SUCCESS) \
2610 return rcStrict2; \
2611 } while (0)
2612#else
2613# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2614#endif
2615
2616#ifndef IEM_WITH_SETJMP
2617
2618/**
2619 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2620 *
2621 * @returns Strict VBox status code.
2622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2623 * @param pu64 Where to return the opcode quad word.
2624 */
2625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2626{
2627 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2628 if (rcStrict == VINF_SUCCESS)
2629 {
2630 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2631 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2632 pVCpu->iem.s.offOpcode = offOpcode + 2;
2633 }
2634 else
2635 *pu64 = 0;
2636 return rcStrict;
2637}
2638
2639
2640/**
2641 * Fetches the next opcode word, zero extending it to a quad word.
2642 *
2643 * @returns Strict VBox status code.
2644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2645 * @param pu64 Where to return the opcode quad word.
2646 */
2647DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2648{
2649 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2650 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2651 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2652
2653 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2654 pVCpu->iem.s.offOpcode = offOpcode + 2;
2655 return VINF_SUCCESS;
2656}
2657
2658#endif /* !IEM_WITH_SETJMP */
2659
2660/**
2661 * Fetches the next opcode word and zero extends it to a quad word, returns
2662 * automatically on failure.
2663 *
2664 * @param a_pu64 Where to return the opcode quad word.
2665 * @remark Implicitly references pVCpu.
2666 */
2667#ifndef IEM_WITH_SETJMP
2668# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2669 do \
2670 { \
2671 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2672 if (rcStrict2 != VINF_SUCCESS) \
2673 return rcStrict2; \
2674 } while (0)
2675#else
2676# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2677#endif
2678
2679
2680#ifndef IEM_WITH_SETJMP
2681/**
2682 * Fetches the next signed word from the opcode stream.
2683 *
2684 * @returns Strict VBox status code.
2685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2686 * @param pi16 Where to return the signed word.
2687 */
2688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2689{
2690 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2691}
2692#endif /* !IEM_WITH_SETJMP */
2693
2694
2695/**
2696 * Fetches the next signed word from the opcode stream, returning automatically
2697 * on failure.
2698 *
2699 * @param a_pi16 Where to return the signed word.
2700 * @remark Implicitly references pVCpu.
2701 */
2702#ifndef IEM_WITH_SETJMP
2703# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2704 do \
2705 { \
2706 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2707 if (rcStrict2 != VINF_SUCCESS) \
2708 return rcStrict2; \
2709 } while (0)
2710#else
2711# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2712#endif
2713
2714#ifndef IEM_WITH_SETJMP
2715
2716/**
2717 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2718 *
2719 * @returns Strict VBox status code.
2720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2721 * @param pu32 Where to return the opcode dword.
2722 */
2723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2724{
2725 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2726 if (rcStrict == VINF_SUCCESS)
2727 {
2728 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2729# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2730 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2731# else
2732 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2733 pVCpu->iem.s.abOpcode[offOpcode + 1],
2734 pVCpu->iem.s.abOpcode[offOpcode + 2],
2735 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2736# endif
2737 pVCpu->iem.s.offOpcode = offOpcode + 4;
2738 }
2739 else
2740 *pu32 = 0;
2741 return rcStrict;
2742}
2743
2744
2745/**
2746 * Fetches the next opcode dword.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2750 * @param pu32 Where to return the opcode double word.
2751 */
2752DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2753{
2754 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2755 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2756 {
2757 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2758# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2759 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2760# else
2761 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2762 pVCpu->iem.s.abOpcode[offOpcode + 1],
2763 pVCpu->iem.s.abOpcode[offOpcode + 2],
2764 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2765# endif
2766 return VINF_SUCCESS;
2767 }
2768 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2769}
2770
2771#else /* !IEM_WITH_SETJMP */
2772
2773/**
2774 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2775 *
2776 * @returns The opcode dword.
2777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2778 */
2779DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2780{
2781# ifdef IEM_WITH_CODE_TLB
2782 uint32_t u32;
2783 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2784 return u32;
2785# else
2786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2787 if (rcStrict == VINF_SUCCESS)
2788 {
2789 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2790 pVCpu->iem.s.offOpcode = offOpcode + 4;
2791# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2792 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2793# else
2794 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2795 pVCpu->iem.s.abOpcode[offOpcode + 1],
2796 pVCpu->iem.s.abOpcode[offOpcode + 2],
2797 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2798# endif
2799 }
2800 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2801# endif
2802}
2803
2804
2805/**
2806 * Fetches the next opcode dword, longjmp on error.
2807 *
2808 * @returns The opcode dword.
2809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2810 */
2811DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2812{
2813# ifdef IEM_WITH_CODE_TLB
2814 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2815 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2816 if (RT_LIKELY( pbBuf != NULL
2817 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2818 {
2819 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2820# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2821 return *(uint32_t const *)&pbBuf[offBuf];
2822# else
2823 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2824 pbBuf[offBuf + 1],
2825 pbBuf[offBuf + 2],
2826 pbBuf[offBuf + 3]);
2827# endif
2828 }
2829# else
2830 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2831 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2832 {
2833 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2834# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2835 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2836# else
2837 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2838 pVCpu->iem.s.abOpcode[offOpcode + 1],
2839 pVCpu->iem.s.abOpcode[offOpcode + 2],
2840 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2841# endif
2842 }
2843# endif
2844 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2845}
2846
2847#endif /* !IEM_WITH_SETJMP */
2848
2849
2850/**
2851 * Fetches the next opcode dword, returns automatically on failure.
2852 *
2853 * @param a_pu32 Where to return the opcode dword.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode dword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, zero extending it to a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2907
2908 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 pVCpu->iem.s.offOpcode = offOpcode + 4;
2913 return VINF_SUCCESS;
2914}
2915
2916#endif /* !IEM_WITH_SETJMP */
2917
2918
2919/**
2920 * Fetches the next opcode dword and zero extends it to a quad word, returns
2921 * automatically on failure.
2922 *
2923 * @param a_pu64 Where to return the opcode quad word.
2924 * @remark Implicitly references pVCpu.
2925 */
2926#ifndef IEM_WITH_SETJMP
2927# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2928 do \
2929 { \
2930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2931 if (rcStrict2 != VINF_SUCCESS) \
2932 return rcStrict2; \
2933 } while (0)
2934#else
2935# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2936#endif
2937
2938
2939#ifndef IEM_WITH_SETJMP
2940/**
2941 * Fetches the next signed double word from the opcode stream.
2942 *
2943 * @returns Strict VBox status code.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param pi32 Where to return the signed double word.
2946 */
2947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2948{
2949 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2950}
2951#endif
2952
2953/**
2954 * Fetches the next signed double word from the opcode stream, returning
2955 * automatically on failure.
2956 *
2957 * @param a_pi32 Where to return the signed double word.
2958 * @remark Implicitly references pVCpu.
2959 */
2960#ifndef IEM_WITH_SETJMP
2961# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2962 do \
2963 { \
2964 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2965 if (rcStrict2 != VINF_SUCCESS) \
2966 return rcStrict2; \
2967 } while (0)
2968#else
2969# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2970#endif
2971
2972#ifndef IEM_WITH_SETJMP
2973
2974/**
2975 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2984 if (rcStrict == VINF_SUCCESS)
2985 {
2986 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2987 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2988 pVCpu->iem.s.abOpcode[offOpcode + 1],
2989 pVCpu->iem.s.abOpcode[offOpcode + 2],
2990 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2991 pVCpu->iem.s.offOpcode = offOpcode + 4;
2992 }
2993 else
2994 *pu64 = 0;
2995 return rcStrict;
2996}
2997
2998
2999/**
3000 * Fetches the next opcode dword, sign extending it into a quad word.
3001 *
3002 * @returns Strict VBox status code.
3003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3004 * @param pu64 Where to return the opcode quad word.
3005 */
3006DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3007{
3008 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3009 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3010 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3011
3012 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3013 pVCpu->iem.s.abOpcode[offOpcode + 1],
3014 pVCpu->iem.s.abOpcode[offOpcode + 2],
3015 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3016 *pu64 = i32;
3017 pVCpu->iem.s.offOpcode = offOpcode + 4;
3018 return VINF_SUCCESS;
3019}
3020
3021#endif /* !IEM_WITH_SETJMP */
3022
3023
3024/**
3025 * Fetches the next opcode double word and sign extends it to a quad word,
3026 * returns automatically on failure.
3027 *
3028 * @param a_pu64 Where to return the opcode quad word.
3029 * @remark Implicitly references pVCpu.
3030 */
3031#ifndef IEM_WITH_SETJMP
3032# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3033 do \
3034 { \
3035 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3036 if (rcStrict2 != VINF_SUCCESS) \
3037 return rcStrict2; \
3038 } while (0)
3039#else
3040# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3041#endif
3042
3043#ifndef IEM_WITH_SETJMP
3044
3045/**
3046 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3047 *
3048 * @returns Strict VBox status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param pu64 Where to return the opcode qword.
3051 */
3052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3053{
3054 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3055 if (rcStrict == VINF_SUCCESS)
3056 {
3057 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3058# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3059 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3060# else
3061 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3062 pVCpu->iem.s.abOpcode[offOpcode + 1],
3063 pVCpu->iem.s.abOpcode[offOpcode + 2],
3064 pVCpu->iem.s.abOpcode[offOpcode + 3],
3065 pVCpu->iem.s.abOpcode[offOpcode + 4],
3066 pVCpu->iem.s.abOpcode[offOpcode + 5],
3067 pVCpu->iem.s.abOpcode[offOpcode + 6],
3068 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3069# endif
3070 pVCpu->iem.s.offOpcode = offOpcode + 8;
3071 }
3072 else
3073 *pu64 = 0;
3074 return rcStrict;
3075}
3076
3077
3078/**
3079 * Fetches the next opcode qword.
3080 *
3081 * @returns Strict VBox status code.
3082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3083 * @param pu64 Where to return the opcode qword.
3084 */
3085DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3086{
3087 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3088 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3089 {
3090# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3091 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3092# else
3093 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3094 pVCpu->iem.s.abOpcode[offOpcode + 1],
3095 pVCpu->iem.s.abOpcode[offOpcode + 2],
3096 pVCpu->iem.s.abOpcode[offOpcode + 3],
3097 pVCpu->iem.s.abOpcode[offOpcode + 4],
3098 pVCpu->iem.s.abOpcode[offOpcode + 5],
3099 pVCpu->iem.s.abOpcode[offOpcode + 6],
3100 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3101# endif
3102 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3103 return VINF_SUCCESS;
3104 }
3105 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3106}
3107
3108#else /* IEM_WITH_SETJMP */
3109
3110/**
3111 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3112 *
3113 * @returns The opcode qword.
3114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3117{
3118# ifdef IEM_WITH_CODE_TLB
3119 uint64_t u64;
3120 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3121 return u64;
3122# else
3123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3124 if (rcStrict == VINF_SUCCESS)
3125 {
3126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3127 pVCpu->iem.s.offOpcode = offOpcode + 8;
3128# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3129 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3130# else
3131 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3132 pVCpu->iem.s.abOpcode[offOpcode + 1],
3133 pVCpu->iem.s.abOpcode[offOpcode + 2],
3134 pVCpu->iem.s.abOpcode[offOpcode + 3],
3135 pVCpu->iem.s.abOpcode[offOpcode + 4],
3136 pVCpu->iem.s.abOpcode[offOpcode + 5],
3137 pVCpu->iem.s.abOpcode[offOpcode + 6],
3138 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3139# endif
3140 }
3141 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3142# endif
3143}
3144
3145
3146/**
3147 * Fetches the next opcode qword, longjmp on error.
3148 *
3149 * @returns The opcode qword.
3150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3151 */
3152DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3153{
3154# ifdef IEM_WITH_CODE_TLB
3155 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3156 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3157 if (RT_LIKELY( pbBuf != NULL
3158 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3159 {
3160 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3161# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3162 return *(uint64_t const *)&pbBuf[offBuf];
3163# else
3164 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3165 pbBuf[offBuf + 1],
3166 pbBuf[offBuf + 2],
3167 pbBuf[offBuf + 3],
3168 pbBuf[offBuf + 4],
3169 pbBuf[offBuf + 5],
3170 pbBuf[offBuf + 6],
3171 pbBuf[offBuf + 7]);
3172# endif
3173 }
3174# else
3175 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3176 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3177 {
3178 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3179# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3180 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3181# else
3182 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3183 pVCpu->iem.s.abOpcode[offOpcode + 1],
3184 pVCpu->iem.s.abOpcode[offOpcode + 2],
3185 pVCpu->iem.s.abOpcode[offOpcode + 3],
3186 pVCpu->iem.s.abOpcode[offOpcode + 4],
3187 pVCpu->iem.s.abOpcode[offOpcode + 5],
3188 pVCpu->iem.s.abOpcode[offOpcode + 6],
3189 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3190# endif
3191 }
3192# endif
3193 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3194}
3195
3196#endif /* IEM_WITH_SETJMP */
3197
3198/**
3199 * Fetches the next opcode quad word, returns automatically on failure.
3200 *
3201 * @param a_pu64 Where to return the opcode quad word.
3202 * @remark Implicitly references pVCpu.
3203 */
3204#ifndef IEM_WITH_SETJMP
3205# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3206 do \
3207 { \
3208 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3209 if (rcStrict2 != VINF_SUCCESS) \
3210 return rcStrict2; \
3211 } while (0)
3212#else
3213# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3214#endif
3215
3216
3217/** @name Misc Worker Functions.
3218 * @{
3219 */
3220
3221/**
3222 * Gets the exception class for the specified exception vector.
3223 *
3224 * @returns The class of the specified exception.
3225 * @param uVector The exception vector.
3226 */
3227IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3228{
3229 Assert(uVector <= X86_XCPT_LAST);
3230 switch (uVector)
3231 {
3232 case X86_XCPT_DE:
3233 case X86_XCPT_TS:
3234 case X86_XCPT_NP:
3235 case X86_XCPT_SS:
3236 case X86_XCPT_GP:
3237 case X86_XCPT_SX: /* AMD only */
3238 return IEMXCPTCLASS_CONTRIBUTORY;
3239
3240 case X86_XCPT_PF:
3241 case X86_XCPT_VE: /* Intel only */
3242 return IEMXCPTCLASS_PAGE_FAULT;
3243
3244 case X86_XCPT_DF:
3245 return IEMXCPTCLASS_DOUBLE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3275
3276 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3277 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3278 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3279 {
3280 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3281 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3282 {
3283 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3284 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3286 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3287 {
3288 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3289 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3290 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3291 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3292 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3293 }
3294 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3295 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3296 {
3297 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3298 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3299 }
3300 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3302 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3303 {
3304 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3305 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3306 }
3307 }
3308 else
3309 {
3310 if (uPrevVector == X86_XCPT_NMI)
3311 {
3312 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3313 if (uCurVector == X86_XCPT_PF)
3314 {
3315 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3316 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3317 }
3318 }
3319 else if ( uPrevVector == X86_XCPT_AC
3320 && uCurVector == X86_XCPT_AC)
3321 {
3322 enmRaise = IEMXCPTRAISE_CPU_HANG;
3323 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3324 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3325 }
3326 }
3327 }
3328 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3329 {
3330 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3331 if (uCurVector == X86_XCPT_PF)
3332 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3333 }
3334 else
3335 {
3336 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3337 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3338 }
3339
3340 if (pfXcptRaiseInfo)
3341 *pfXcptRaiseInfo = fRaiseInfo;
3342 return enmRaise;
3343}
3344
3345
3346/**
3347 * Enters the CPU shutdown state initiated by a triple fault or other
3348 * unrecoverable conditions.
3349 *
3350 * @returns Strict VBox status code.
3351 * @param pVCpu The cross context virtual CPU structure of the
3352 * calling thread.
3353 */
3354IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3355{
3356 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3357 {
3358 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3359 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3360 }
3361
3362 RT_NOREF(pVCpu);
3363 return VINF_EM_TRIPLE_FAULT;
3364}
3365
3366
3367/**
3368 * Validates a new SS segment.
3369 *
3370 * @returns VBox strict status code.
3371 * @param pVCpu The cross context virtual CPU structure of the
3372 * calling thread.
3373 * @param pCtx The CPU context.
3374 * @param NewSS The new SS selctor.
3375 * @param uCpl The CPL to load the stack for.
3376 * @param pDesc Where to return the descriptor.
3377 */
3378IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3379{
3380 NOREF(pCtx);
3381
3382 /* Null selectors are not allowed (we're not called for dispatching
3383 interrupts with SS=0 in long mode). */
3384 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3385 {
3386 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3387 return iemRaiseTaskSwitchFault0(pVCpu);
3388 }
3389
3390 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3391 if ((NewSS & X86_SEL_RPL) != uCpl)
3392 {
3393 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3394 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3395 }
3396
3397 /*
3398 * Read the descriptor.
3399 */
3400 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3401 if (rcStrict != VINF_SUCCESS)
3402 return rcStrict;
3403
3404 /*
3405 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3406 */
3407 if (!pDesc->Legacy.Gen.u1DescType)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3414 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3417 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3418 }
3419 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3420 {
3421 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3422 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3423 }
3424
3425 /* Is it there? */
3426 /** @todo testcase: Is this checked before the canonical / limit check below? */
3427 if (!pDesc->Legacy.Gen.u1Present)
3428 {
3429 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3430 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3431 }
3432
3433 return VINF_SUCCESS;
3434}
3435
3436
3437/**
3438 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3439 * not.
3440 *
3441 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3442 * @param a_pCtx The CPU context.
3443 */
3444#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3445# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3446 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3447 ? (a_pCtx)->eflags.u \
3448 : CPUMRawGetEFlags(a_pVCpu) )
3449#else
3450# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3451 ( (a_pCtx)->eflags.u )
3452#endif
3453
3454/**
3455 * Updates the EFLAGS in the correct manner wrt. PATM.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 * @param a_fEfl The new EFLAGS.
3460 */
3461#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3462# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3463 do { \
3464 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3465 (a_pCtx)->eflags.u = (a_fEfl); \
3466 else \
3467 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3468 } while (0)
3469#else
3470# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3471 do { \
3472 (a_pCtx)->eflags.u = (a_fEfl); \
3473 } while (0)
3474#endif
3475
3476
3477/** @} */
3478
3479/** @name Raising Exceptions.
3480 *
3481 * @{
3482 */
3483
3484
3485/**
3486 * Loads the specified stack far pointer from the TSS.
3487 *
3488 * @returns VBox strict status code.
3489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3490 * @param pCtx The CPU context.
3491 * @param uCpl The CPL to load the stack for.
3492 * @param pSelSS Where to return the new stack segment.
3493 * @param puEsp Where to return the new stack pointer.
3494 */
3495IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3496 PRTSEL pSelSS, uint32_t *puEsp)
3497{
3498 VBOXSTRICTRC rcStrict;
3499 Assert(uCpl < 4);
3500
3501 switch (pCtx->tr.Attr.n.u4Type)
3502 {
3503 /*
3504 * 16-bit TSS (X86TSS16).
3505 */
3506 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3507 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3508 {
3509 uint32_t off = uCpl * 4 + 2;
3510 if (off + 4 <= pCtx->tr.u32Limit)
3511 {
3512 /** @todo check actual access pattern here. */
3513 uint32_t u32Tmp = 0; /* gcc maybe... */
3514 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3515 if (rcStrict == VINF_SUCCESS)
3516 {
3517 *puEsp = RT_LOWORD(u32Tmp);
3518 *pSelSS = RT_HIWORD(u32Tmp);
3519 return VINF_SUCCESS;
3520 }
3521 }
3522 else
3523 {
3524 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3525 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3526 }
3527 break;
3528 }
3529
3530 /*
3531 * 32-bit TSS (X86TSS32).
3532 */
3533 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3534 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3535 {
3536 uint32_t off = uCpl * 8 + 4;
3537 if (off + 7 <= pCtx->tr.u32Limit)
3538 {
3539/** @todo check actual access pattern here. */
3540 uint64_t u64Tmp;
3541 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 *puEsp = u64Tmp & UINT32_MAX;
3545 *pSelSS = (RTSEL)(u64Tmp >> 32);
3546 return VINF_SUCCESS;
3547 }
3548 }
3549 else
3550 {
3551 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3552 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3553 }
3554 break;
3555 }
3556
3557 default:
3558 AssertFailed();
3559 rcStrict = VERR_IEM_IPE_4;
3560 break;
3561 }
3562
3563 *puEsp = 0; /* make gcc happy */
3564 *pSelSS = 0; /* make gcc happy */
3565 return rcStrict;
3566}
3567
3568
3569/**
3570 * Loads the specified stack pointer from the 64-bit TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param pCtx The CPU context.
3575 * @param uCpl The CPL to load the stack for.
3576 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3577 * @param puRsp Where to return the new stack pointer.
3578 */
3579IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3580{
3581 Assert(uCpl < 4);
3582 Assert(uIst < 8);
3583 *puRsp = 0; /* make gcc happy */
3584
3585 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3586
3587 uint32_t off;
3588 if (uIst)
3589 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3590 else
3591 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3592 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3593 {
3594 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3595 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3596 }
3597
3598 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3599}
3600
3601
3602/**
3603 * Adjust the CPU state according to the exception being raised.
3604 *
3605 * @param pCtx The CPU context.
3606 * @param u8Vector The exception that has been raised.
3607 */
3608DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3609{
3610 switch (u8Vector)
3611 {
3612 case X86_XCPT_DB:
3613 pCtx->dr[7] &= ~X86_DR7_GD;
3614 break;
3615 /** @todo Read the AMD and Intel exception reference... */
3616 }
3617}
3618
3619
3620/**
3621 * Implements exceptions and interrupts for real mode.
3622 *
3623 * @returns VBox strict status code.
3624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3625 * @param pCtx The CPU context.
3626 * @param cbInstr The number of bytes to offset rIP by in the return
3627 * address.
3628 * @param u8Vector The interrupt / exception vector number.
3629 * @param fFlags The flags.
3630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3632 */
3633IEM_STATIC VBOXSTRICTRC
3634iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3635 PCPUMCTX pCtx,
3636 uint8_t cbInstr,
3637 uint8_t u8Vector,
3638 uint32_t fFlags,
3639 uint16_t uErr,
3640 uint64_t uCr2)
3641{
3642 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3643 NOREF(uErr); NOREF(uCr2);
3644
3645 /*
3646 * Read the IDT entry.
3647 */
3648 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3649 {
3650 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3651 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3652 }
3653 RTFAR16 Idte;
3654 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3655 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3656 return rcStrict;
3657
3658 /*
3659 * Push the stack frame.
3660 */
3661 uint16_t *pu16Frame;
3662 uint64_t uNewRsp;
3663 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666
3667 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3668#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3669 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3670 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3671 fEfl |= UINT16_C(0xf000);
3672#endif
3673 pu16Frame[2] = (uint16_t)fEfl;
3674 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3675 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3676 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3677 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3678 return rcStrict;
3679
3680 /*
3681 * Load the vector address into cs:ip and make exception specific state
3682 * adjustments.
3683 */
3684 pCtx->cs.Sel = Idte.sel;
3685 pCtx->cs.ValidSel = Idte.sel;
3686 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3687 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3688 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3689 pCtx->rip = Idte.off;
3690 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3691 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3692
3693 /** @todo do we actually do this in real mode? */
3694 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3695 iemRaiseXcptAdjustState(pCtx, u8Vector);
3696
3697 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Loads a NULL data selector into when coming from V8086 mode.
3703 *
3704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3705 * @param pSReg Pointer to the segment register.
3706 */
3707IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3708{
3709 pSReg->Sel = 0;
3710 pSReg->ValidSel = 0;
3711 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3712 {
3713 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3714 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3715 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3716 }
3717 else
3718 {
3719 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3720 /** @todo check this on AMD-V */
3721 pSReg->u64Base = 0;
3722 pSReg->u32Limit = 0;
3723 }
3724}
3725
3726
3727/**
3728 * Loads a segment selector during a task switch in V8086 mode.
3729 *
3730 * @param pSReg Pointer to the segment register.
3731 * @param uSel The selector value to load.
3732 */
3733IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3734{
3735 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3736 pSReg->Sel = uSel;
3737 pSReg->ValidSel = uSel;
3738 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3739 pSReg->u64Base = uSel << 4;
3740 pSReg->u32Limit = 0xffff;
3741 pSReg->Attr.u = 0xf3;
3742}
3743
3744
3745/**
3746 * Loads a NULL data selector into a selector register, both the hidden and
3747 * visible parts, in protected mode.
3748 *
3749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3750 * @param pSReg Pointer to the segment register.
3751 * @param uRpl The RPL.
3752 */
3753IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3754{
3755 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3756 * data selector in protected mode. */
3757 pSReg->Sel = uRpl;
3758 pSReg->ValidSel = uRpl;
3759 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3760 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3761 {
3762 /* VT-x (Intel 3960x) observed doing something like this. */
3763 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3764 pSReg->u32Limit = UINT32_MAX;
3765 pSReg->u64Base = 0;
3766 }
3767 else
3768 {
3769 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3770 pSReg->u32Limit = 0;
3771 pSReg->u64Base = 0;
3772 }
3773}
3774
3775
3776/**
3777 * Loads a segment selector during a task switch in protected mode.
3778 *
3779 * In this task switch scenario, we would throw \#TS exceptions rather than
3780 * \#GPs.
3781 *
3782 * @returns VBox strict status code.
3783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3784 * @param pSReg Pointer to the segment register.
3785 * @param uSel The new selector value.
3786 *
3787 * @remarks This does _not_ handle CS or SS.
3788 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3789 */
3790IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3791{
3792 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3793
3794 /* Null data selector. */
3795 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3796 {
3797 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3798 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3800 return VINF_SUCCESS;
3801 }
3802
3803 /* Fetch the descriptor. */
3804 IEMSELDESC Desc;
3805 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3806 if (rcStrict != VINF_SUCCESS)
3807 {
3808 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3809 VBOXSTRICTRC_VAL(rcStrict)));
3810 return rcStrict;
3811 }
3812
3813 /* Must be a data segment or readable code segment. */
3814 if ( !Desc.Legacy.Gen.u1DescType
3815 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3816 {
3817 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3818 Desc.Legacy.Gen.u4Type));
3819 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3820 }
3821
3822 /* Check privileges for data segments and non-conforming code segments. */
3823 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3825 {
3826 /* The RPL and the new CPL must be less than or equal to the DPL. */
3827 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3828 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3829 {
3830 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3831 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3832 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3833 }
3834 }
3835
3836 /* Is it there? */
3837 if (!Desc.Legacy.Gen.u1Present)
3838 {
3839 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3840 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842
3843 /* The base and limit. */
3844 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3845 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3846
3847 /*
3848 * Ok, everything checked out fine. Now set the accessed bit before
3849 * committing the result into the registers.
3850 */
3851 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3852 {
3853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3854 if (rcStrict != VINF_SUCCESS)
3855 return rcStrict;
3856 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3857 }
3858
3859 /* Commit */
3860 pSReg->Sel = uSel;
3861 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3862 pSReg->u32Limit = cbLimit;
3863 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3864 pSReg->ValidSel = uSel;
3865 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3866 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3867 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3868
3869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * Performs a task switch.
3877 *
3878 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3879 * caller is responsible for performing the necessary checks (like DPL, TSS
3880 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3881 * reference for JMP, CALL, IRET.
3882 *
3883 * If the task switch is the due to a software interrupt or hardware exception,
3884 * the caller is responsible for validating the TSS selector and descriptor. See
3885 * Intel Instruction reference for INT n.
3886 *
3887 * @returns VBox strict status code.
3888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3889 * @param pCtx The CPU context.
3890 * @param enmTaskSwitch What caused this task switch.
3891 * @param uNextEip The EIP effective after the task switch.
3892 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3895 * @param SelTSS The TSS selector of the new task.
3896 * @param pNewDescTSS Pointer to the new TSS descriptor.
3897 */
3898IEM_STATIC VBOXSTRICTRC
3899iemTaskSwitch(PVMCPU pVCpu,
3900 PCPUMCTX pCtx,
3901 IEMTASKSWITCH enmTaskSwitch,
3902 uint32_t uNextEip,
3903 uint32_t fFlags,
3904 uint16_t uErr,
3905 uint64_t uCr2,
3906 RTSEL SelTSS,
3907 PIEMSELDESC pNewDescTSS)
3908{
3909 Assert(!IEM_IS_REAL_MODE(pVCpu));
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911
3912 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3913 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3914 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3917
3918 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3919 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3920
3921 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3922 fIsNewTSS386, pCtx->eip, uNextEip));
3923
3924 /* Update CR2 in case it's a page-fault. */
3925 /** @todo This should probably be done much earlier in IEM/PGM. See
3926 * @bugref{5653#c49}. */
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pCtx->cr2 = uCr2;
3929
3930 /*
3931 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3932 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3933 */
3934 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3935 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3936 if (uNewTSSLimit < uNewTSSLimitMin)
3937 {
3938 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3939 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /*
3944 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3945 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3946 */
3947 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3948 {
3949 uint32_t const uExitInfo1 = SelTSS;
3950 uint32_t uExitInfo2 = uErr;
3951 switch (enmTaskSwitch)
3952 {
3953 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3954 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3955 default: break;
3956 }
3957 if (fFlags & IEM_XCPT_FLAGS_ERR)
3958 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3959 if (pCtx->eflags.Bits.u1RF)
3960 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3961
3962 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3963 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3964 RT_NOREF2(uExitInfo1, uExitInfo2);
3965 }
3966 /** @todo Nested-VMX task-switch intercept. */
3967
3968 /*
3969 * Check the current TSS limit. The last written byte to the current TSS during the
3970 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3971 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3972 *
3973 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3974 * end up with smaller than "legal" TSS limits.
3975 */
3976 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3977 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3978 if (uCurTSSLimit < uCurTSSLimitMin)
3979 {
3980 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3981 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3982 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3983 }
3984
3985 /*
3986 * Verify that the new TSS can be accessed and map it. Map only the required contents
3987 * and not the entire TSS.
3988 */
3989 void *pvNewTSS;
3990 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3991 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3992 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3993 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3994 * not perform correct translation if this happens. See Intel spec. 7.2.1
3995 * "Task-State Segment" */
3996 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3997 if (rcStrict != VINF_SUCCESS)
3998 {
3999 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4000 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4001 return rcStrict;
4002 }
4003
4004 /*
4005 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4006 */
4007 uint32_t u32EFlags = pCtx->eflags.u32;
4008 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4009 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4010 {
4011 PX86DESC pDescCurTSS;
4012 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4013 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4014 if (rcStrict != VINF_SUCCESS)
4015 {
4016 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4017 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4018 return rcStrict;
4019 }
4020
4021 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4022 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4023 if (rcStrict != VINF_SUCCESS)
4024 {
4025 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4026 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4027 return rcStrict;
4028 }
4029
4030 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4031 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4032 {
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4035 u32EFlags &= ~X86_EFL_NT;
4036 }
4037 }
4038
4039 /*
4040 * Save the CPU state into the current TSS.
4041 */
4042 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4043 if (GCPtrNewTSS == GCPtrCurTSS)
4044 {
4045 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4046 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4047 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4048 }
4049 if (fIsNewTSS386)
4050 {
4051 /*
4052 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4053 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4054 */
4055 void *pvCurTSS32;
4056 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4057 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4058 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4059 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4060 if (rcStrict != VINF_SUCCESS)
4061 {
4062 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4063 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4064 return rcStrict;
4065 }
4066
4067 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4068 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4069 pCurTSS32->eip = uNextEip;
4070 pCurTSS32->eflags = u32EFlags;
4071 pCurTSS32->eax = pCtx->eax;
4072 pCurTSS32->ecx = pCtx->ecx;
4073 pCurTSS32->edx = pCtx->edx;
4074 pCurTSS32->ebx = pCtx->ebx;
4075 pCurTSS32->esp = pCtx->esp;
4076 pCurTSS32->ebp = pCtx->ebp;
4077 pCurTSS32->esi = pCtx->esi;
4078 pCurTSS32->edi = pCtx->edi;
4079 pCurTSS32->es = pCtx->es.Sel;
4080 pCurTSS32->cs = pCtx->cs.Sel;
4081 pCurTSS32->ss = pCtx->ss.Sel;
4082 pCurTSS32->ds = pCtx->ds.Sel;
4083 pCurTSS32->fs = pCtx->fs.Sel;
4084 pCurTSS32->gs = pCtx->gs.Sel;
4085
4086 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4087 if (rcStrict != VINF_SUCCESS)
4088 {
4089 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4090 VBOXSTRICTRC_VAL(rcStrict)));
4091 return rcStrict;
4092 }
4093 }
4094 else
4095 {
4096 /*
4097 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4098 */
4099 void *pvCurTSS16;
4100 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4101 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4102 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4103 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4107 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4112 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4113 pCurTSS16->ip = uNextEip;
4114 pCurTSS16->flags = u32EFlags;
4115 pCurTSS16->ax = pCtx->ax;
4116 pCurTSS16->cx = pCtx->cx;
4117 pCurTSS16->dx = pCtx->dx;
4118 pCurTSS16->bx = pCtx->bx;
4119 pCurTSS16->sp = pCtx->sp;
4120 pCurTSS16->bp = pCtx->bp;
4121 pCurTSS16->si = pCtx->si;
4122 pCurTSS16->di = pCtx->di;
4123 pCurTSS16->es = pCtx->es.Sel;
4124 pCurTSS16->cs = pCtx->cs.Sel;
4125 pCurTSS16->ss = pCtx->ss.Sel;
4126 pCurTSS16->ds = pCtx->ds.Sel;
4127
4128 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4132 VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135 }
4136
4137 /*
4138 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4139 */
4140 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4141 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4142 {
4143 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4144 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4145 pNewTSS->selPrev = pCtx->tr.Sel;
4146 }
4147
4148 /*
4149 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4150 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4151 */
4152 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4153 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4154 bool fNewDebugTrap;
4155 if (fIsNewTSS386)
4156 {
4157 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4158 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4159 uNewEip = pNewTSS32->eip;
4160 uNewEflags = pNewTSS32->eflags;
4161 uNewEax = pNewTSS32->eax;
4162 uNewEcx = pNewTSS32->ecx;
4163 uNewEdx = pNewTSS32->edx;
4164 uNewEbx = pNewTSS32->ebx;
4165 uNewEsp = pNewTSS32->esp;
4166 uNewEbp = pNewTSS32->ebp;
4167 uNewEsi = pNewTSS32->esi;
4168 uNewEdi = pNewTSS32->edi;
4169 uNewES = pNewTSS32->es;
4170 uNewCS = pNewTSS32->cs;
4171 uNewSS = pNewTSS32->ss;
4172 uNewDS = pNewTSS32->ds;
4173 uNewFS = pNewTSS32->fs;
4174 uNewGS = pNewTSS32->gs;
4175 uNewLdt = pNewTSS32->selLdt;
4176 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4177 }
4178 else
4179 {
4180 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4181 uNewCr3 = 0;
4182 uNewEip = pNewTSS16->ip;
4183 uNewEflags = pNewTSS16->flags;
4184 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4185 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4186 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4187 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4188 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4189 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4190 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4191 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4192 uNewES = pNewTSS16->es;
4193 uNewCS = pNewTSS16->cs;
4194 uNewSS = pNewTSS16->ss;
4195 uNewDS = pNewTSS16->ds;
4196 uNewFS = 0;
4197 uNewGS = 0;
4198 uNewLdt = pNewTSS16->selLdt;
4199 fNewDebugTrap = false;
4200 }
4201
4202 if (GCPtrNewTSS == GCPtrCurTSS)
4203 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4204 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4205
4206 /*
4207 * We're done accessing the new TSS.
4208 */
4209 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4210 if (rcStrict != VINF_SUCCESS)
4211 {
4212 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4213 return rcStrict;
4214 }
4215
4216 /*
4217 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4218 */
4219 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4220 {
4221 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4222 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4223 if (rcStrict != VINF_SUCCESS)
4224 {
4225 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4226 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /* Check that the descriptor indicates the new TSS is available (not busy). */
4231 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4232 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4233 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4234
4235 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4236 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4240 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243 }
4244
4245 /*
4246 * From this point on, we're technically in the new task. We will defer exceptions
4247 * until the completion of the task switch but before executing any instructions in the new task.
4248 */
4249 pCtx->tr.Sel = SelTSS;
4250 pCtx->tr.ValidSel = SelTSS;
4251 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4252 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4253 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4254 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4256
4257 /* Set the busy bit in TR. */
4258 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4259 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4260 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4261 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4262 {
4263 uNewEflags |= X86_EFL_NT;
4264 }
4265
4266 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4267 pCtx->cr0 |= X86_CR0_TS;
4268 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4269
4270 pCtx->eip = uNewEip;
4271 pCtx->eax = uNewEax;
4272 pCtx->ecx = uNewEcx;
4273 pCtx->edx = uNewEdx;
4274 pCtx->ebx = uNewEbx;
4275 pCtx->esp = uNewEsp;
4276 pCtx->ebp = uNewEbp;
4277 pCtx->esi = uNewEsi;
4278 pCtx->edi = uNewEdi;
4279
4280 uNewEflags &= X86_EFL_LIVE_MASK;
4281 uNewEflags |= X86_EFL_RA1_MASK;
4282 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4283
4284 /*
4285 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4286 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4287 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4288 */
4289 pCtx->es.Sel = uNewES;
4290 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4291
4292 pCtx->cs.Sel = uNewCS;
4293 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4294
4295 pCtx->ss.Sel = uNewSS;
4296 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4297
4298 pCtx->ds.Sel = uNewDS;
4299 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4300
4301 pCtx->fs.Sel = uNewFS;
4302 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4303
4304 pCtx->gs.Sel = uNewGS;
4305 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4307
4308 pCtx->ldtr.Sel = uNewLdt;
4309 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4310 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4311 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4312
4313 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4314 {
4315 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4316 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4317 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4318 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4319 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4320 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4321 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4322 }
4323
4324 /*
4325 * Switch CR3 for the new task.
4326 */
4327 if ( fIsNewTSS386
4328 && (pCtx->cr0 & X86_CR0_PG))
4329 {
4330 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4331 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4332 {
4333 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4334 AssertRCSuccessReturn(rc, rc);
4335 }
4336 else
4337 pCtx->cr3 = uNewCr3;
4338
4339 /* Inform PGM. */
4340 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4341 {
4342 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4343 AssertRCReturn(rc, rc);
4344 /* ignore informational status codes */
4345 }
4346 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4347 }
4348
4349 /*
4350 * Switch LDTR for the new task.
4351 */
4352 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4353 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4354 else
4355 {
4356 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4357
4358 IEMSELDESC DescNewLdt;
4359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4360 if (rcStrict != VINF_SUCCESS)
4361 {
4362 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4363 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4364 return rcStrict;
4365 }
4366 if ( !DescNewLdt.Legacy.Gen.u1Present
4367 || DescNewLdt.Legacy.Gen.u1DescType
4368 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4369 {
4370 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4371 uNewLdt, DescNewLdt.Legacy.u));
4372 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4373 }
4374
4375 pCtx->ldtr.ValidSel = uNewLdt;
4376 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4377 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4378 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4379 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4380 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4381 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4382 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4383 }
4384
4385 IEMSELDESC DescSS;
4386 if (IEM_IS_V86_MODE(pVCpu))
4387 {
4388 pVCpu->iem.s.uCpl = 3;
4389 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4390 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4391 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4392 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4393 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4394 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4395
4396 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4397 DescSS.Legacy.u = 0;
4398 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4399 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4400 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4401 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4402 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4403 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4404 DescSS.Legacy.Gen.u2Dpl = 3;
4405 }
4406 else
4407 {
4408 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4409
4410 /*
4411 * Load the stack segment for the new task.
4412 */
4413 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4414 {
4415 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4416 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4417 }
4418
4419 /* Fetch the descriptor. */
4420 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4421 if (rcStrict != VINF_SUCCESS)
4422 {
4423 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4424 VBOXSTRICTRC_VAL(rcStrict)));
4425 return rcStrict;
4426 }
4427
4428 /* SS must be a data segment and writable. */
4429 if ( !DescSS.Legacy.Gen.u1DescType
4430 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4431 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4432 {
4433 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4434 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4435 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4436 }
4437
4438 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4439 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4440 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4441 {
4442 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4443 uNewCpl));
4444 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4445 }
4446
4447 /* Is it there? */
4448 if (!DescSS.Legacy.Gen.u1Present)
4449 {
4450 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4451 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4455 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4456
4457 /* Set the accessed bit before committing the result into SS. */
4458 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4459 {
4460 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4461 if (rcStrict != VINF_SUCCESS)
4462 return rcStrict;
4463 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4464 }
4465
4466 /* Commit SS. */
4467 pCtx->ss.Sel = uNewSS;
4468 pCtx->ss.ValidSel = uNewSS;
4469 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4470 pCtx->ss.u32Limit = cbLimit;
4471 pCtx->ss.u64Base = u64Base;
4472 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4474
4475 /* CPL has changed, update IEM before loading rest of segments. */
4476 pVCpu->iem.s.uCpl = uNewCpl;
4477
4478 /*
4479 * Load the data segments for the new task.
4480 */
4481 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4482 if (rcStrict != VINF_SUCCESS)
4483 return rcStrict;
4484 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4485 if (rcStrict != VINF_SUCCESS)
4486 return rcStrict;
4487 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4488 if (rcStrict != VINF_SUCCESS)
4489 return rcStrict;
4490 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4491 if (rcStrict != VINF_SUCCESS)
4492 return rcStrict;
4493
4494 /*
4495 * Load the code segment for the new task.
4496 */
4497 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4498 {
4499 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4500 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* Fetch the descriptor. */
4504 IEMSELDESC DescCS;
4505 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4506 if (rcStrict != VINF_SUCCESS)
4507 {
4508 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4509 return rcStrict;
4510 }
4511
4512 /* CS must be a code segment. */
4513 if ( !DescCS.Legacy.Gen.u1DescType
4514 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4515 {
4516 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4517 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4518 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4519 }
4520
4521 /* For conforming CS, DPL must be less than or equal to the RPL. */
4522 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4523 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4524 {
4525 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4526 DescCS.Legacy.Gen.u2Dpl));
4527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4528 }
4529
4530 /* For non-conforming CS, DPL must match RPL. */
4531 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4532 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4533 {
4534 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4535 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* Is it there? */
4540 if (!DescCS.Legacy.Gen.u1Present)
4541 {
4542 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4543 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4547 u64Base = X86DESC_BASE(&DescCS.Legacy);
4548
4549 /* Set the accessed bit before committing the result into CS. */
4550 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4551 {
4552 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4556 }
4557
4558 /* Commit CS. */
4559 pCtx->cs.Sel = uNewCS;
4560 pCtx->cs.ValidSel = uNewCS;
4561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4562 pCtx->cs.u32Limit = cbLimit;
4563 pCtx->cs.u64Base = u64Base;
4564 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4566 }
4567
4568 /** @todo Debug trap. */
4569 if (fIsNewTSS386 && fNewDebugTrap)
4570 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4571
4572 /*
4573 * Construct the error code masks based on what caused this task switch.
4574 * See Intel Instruction reference for INT.
4575 */
4576 uint16_t uExt;
4577 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4578 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4579 {
4580 uExt = 1;
4581 }
4582 else
4583 uExt = 0;
4584
4585 /*
4586 * Push any error code on to the new stack.
4587 */
4588 if (fFlags & IEM_XCPT_FLAGS_ERR)
4589 {
4590 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4591 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4592 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4593
4594 /* Check that there is sufficient space on the stack. */
4595 /** @todo Factor out segment limit checking for normal/expand down segments
4596 * into a separate function. */
4597 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4598 {
4599 if ( pCtx->esp - 1 > cbLimitSS
4600 || pCtx->esp < cbStackFrame)
4601 {
4602 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4603 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4604 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4605 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4606 }
4607 }
4608 else
4609 {
4610 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4611 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4612 {
4613 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4614 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4615 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4616 }
4617 }
4618
4619
4620 if (fIsNewTSS386)
4621 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4622 else
4623 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4624 if (rcStrict != VINF_SUCCESS)
4625 {
4626 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4627 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4628 return rcStrict;
4629 }
4630 }
4631
4632 /* Check the new EIP against the new CS limit. */
4633 if (pCtx->eip > pCtx->cs.u32Limit)
4634 {
4635 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4636 pCtx->eip, pCtx->cs.u32Limit));
4637 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4638 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4639 }
4640
4641 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4642 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4643}
4644
4645
4646/**
4647 * Implements exceptions and interrupts for protected mode.
4648 *
4649 * @returns VBox strict status code.
4650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4651 * @param pCtx The CPU context.
4652 * @param cbInstr The number of bytes to offset rIP by in the return
4653 * address.
4654 * @param u8Vector The interrupt / exception vector number.
4655 * @param fFlags The flags.
4656 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4657 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4658 */
4659IEM_STATIC VBOXSTRICTRC
4660iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4661 PCPUMCTX pCtx,
4662 uint8_t cbInstr,
4663 uint8_t u8Vector,
4664 uint32_t fFlags,
4665 uint16_t uErr,
4666 uint64_t uCr2)
4667{
4668 /*
4669 * Read the IDT entry.
4670 */
4671 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4672 {
4673 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4674 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4675 }
4676 X86DESC Idte;
4677 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4678 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4679 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4680 return rcStrict;
4681 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4682 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4683 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4684
4685 /*
4686 * Check the descriptor type, DPL and such.
4687 * ASSUMES this is done in the same order as described for call-gate calls.
4688 */
4689 if (Idte.Gate.u1DescType)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 bool fTaskGate = false;
4695 uint8_t f32BitGate = true;
4696 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4697 switch (Idte.Gate.u4Type)
4698 {
4699 case X86_SEL_TYPE_SYS_UNDEFINED:
4700 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4701 case X86_SEL_TYPE_SYS_LDT:
4702 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4703 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4704 case X86_SEL_TYPE_SYS_UNDEFINED2:
4705 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4706 case X86_SEL_TYPE_SYS_UNDEFINED3:
4707 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4708 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4709 case X86_SEL_TYPE_SYS_UNDEFINED4:
4710 {
4711 /** @todo check what actually happens when the type is wrong...
4712 * esp. call gates. */
4713 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4714 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4715 }
4716
4717 case X86_SEL_TYPE_SYS_286_INT_GATE:
4718 f32BitGate = false;
4719 RT_FALL_THRU();
4720 case X86_SEL_TYPE_SYS_386_INT_GATE:
4721 fEflToClear |= X86_EFL_IF;
4722 break;
4723
4724 case X86_SEL_TYPE_SYS_TASK_GATE:
4725 fTaskGate = true;
4726#ifndef IEM_IMPLEMENTS_TASKSWITCH
4727 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4728#endif
4729 break;
4730
4731 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4732 f32BitGate = false;
4733 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4734 break;
4735
4736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4737 }
4738
4739 /* Check DPL against CPL if applicable. */
4740 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4741 {
4742 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4743 {
4744 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4745 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4746 }
4747 }
4748
4749 /* Is it there? */
4750 if (!Idte.Gate.u1Present)
4751 {
4752 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4753 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4754 }
4755
4756 /* Is it a task-gate? */
4757 if (fTaskGate)
4758 {
4759 /*
4760 * Construct the error code masks based on what caused this task switch.
4761 * See Intel Instruction reference for INT.
4762 */
4763 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4764 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4765 RTSEL SelTSS = Idte.Gate.u16Sel;
4766
4767 /*
4768 * Fetch the TSS descriptor in the GDT.
4769 */
4770 IEMSELDESC DescTSS;
4771 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4772 if (rcStrict != VINF_SUCCESS)
4773 {
4774 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4775 VBOXSTRICTRC_VAL(rcStrict)));
4776 return rcStrict;
4777 }
4778
4779 /* The TSS descriptor must be a system segment and be available (not busy). */
4780 if ( DescTSS.Legacy.Gen.u1DescType
4781 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4782 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4783 {
4784 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4785 u8Vector, SelTSS, DescTSS.Legacy.au64));
4786 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4787 }
4788
4789 /* The TSS must be present. */
4790 if (!DescTSS.Legacy.Gen.u1Present)
4791 {
4792 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4793 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4794 }
4795
4796 /* Do the actual task switch. */
4797 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4798 }
4799
4800 /* A null CS is bad. */
4801 RTSEL NewCS = Idte.Gate.u16Sel;
4802 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4803 {
4804 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4805 return iemRaiseGeneralProtectionFault0(pVCpu);
4806 }
4807
4808 /* Fetch the descriptor for the new CS. */
4809 IEMSELDESC DescCS;
4810 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4811 if (rcStrict != VINF_SUCCESS)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4814 return rcStrict;
4815 }
4816
4817 /* Must be a code segment. */
4818 if (!DescCS.Legacy.Gen.u1DescType)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4822 }
4823 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4824 {
4825 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4826 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4827 }
4828
4829 /* Don't allow lowering the privilege level. */
4830 /** @todo Does the lowering of privileges apply to software interrupts
4831 * only? This has bearings on the more-privileged or
4832 * same-privilege stack behavior further down. A testcase would
4833 * be nice. */
4834 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4837 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4838 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4839 }
4840
4841 /* Make sure the selector is present. */
4842 if (!DescCS.Legacy.Gen.u1Present)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4845 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4846 }
4847
4848 /* Check the new EIP against the new CS limit. */
4849 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4850 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4851 ? Idte.Gate.u16OffsetLow
4852 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4853 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4854 if (uNewEip > cbLimitCS)
4855 {
4856 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4857 u8Vector, uNewEip, cbLimitCS, NewCS));
4858 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4859 }
4860 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4861
4862 /* Calc the flag image to push. */
4863 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4864 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4865 fEfl &= ~X86_EFL_RF;
4866 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4867 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4868
4869 /* From V8086 mode only go to CPL 0. */
4870 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4871 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4872 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4875 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4876 }
4877
4878 /*
4879 * If the privilege level changes, we need to get a new stack from the TSS.
4880 * This in turns means validating the new SS and ESP...
4881 */
4882 if (uNewCpl != pVCpu->iem.s.uCpl)
4883 {
4884 RTSEL NewSS;
4885 uint32_t uNewEsp;
4886 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4887 if (rcStrict != VINF_SUCCESS)
4888 return rcStrict;
4889
4890 IEMSELDESC DescSS;
4891 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4892 if (rcStrict != VINF_SUCCESS)
4893 return rcStrict;
4894 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4895 if (!DescSS.Legacy.Gen.u1DefBig)
4896 {
4897 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4898 uNewEsp = (uint16_t)uNewEsp;
4899 }
4900
4901 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4902
4903 /* Check that there is sufficient space for the stack frame. */
4904 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4905 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4906 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4907 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4908
4909 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4910 {
4911 if ( uNewEsp - 1 > cbLimitSS
4912 || uNewEsp < cbStackFrame)
4913 {
4914 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4915 u8Vector, NewSS, uNewEsp, cbStackFrame));
4916 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4917 }
4918 }
4919 else
4920 {
4921 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4922 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4923 {
4924 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4925 u8Vector, NewSS, uNewEsp, cbStackFrame));
4926 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4927 }
4928 }
4929
4930 /*
4931 * Start making changes.
4932 */
4933
4934 /* Set the new CPL so that stack accesses use it. */
4935 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4936 pVCpu->iem.s.uCpl = uNewCpl;
4937
4938 /* Create the stack frame. */
4939 RTPTRUNION uStackFrame;
4940 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4941 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4942 if (rcStrict != VINF_SUCCESS)
4943 return rcStrict;
4944 void * const pvStackFrame = uStackFrame.pv;
4945 if (f32BitGate)
4946 {
4947 if (fFlags & IEM_XCPT_FLAGS_ERR)
4948 *uStackFrame.pu32++ = uErr;
4949 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4950 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4951 uStackFrame.pu32[2] = fEfl;
4952 uStackFrame.pu32[3] = pCtx->esp;
4953 uStackFrame.pu32[4] = pCtx->ss.Sel;
4954 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4955 if (fEfl & X86_EFL_VM)
4956 {
4957 uStackFrame.pu32[1] = pCtx->cs.Sel;
4958 uStackFrame.pu32[5] = pCtx->es.Sel;
4959 uStackFrame.pu32[6] = pCtx->ds.Sel;
4960 uStackFrame.pu32[7] = pCtx->fs.Sel;
4961 uStackFrame.pu32[8] = pCtx->gs.Sel;
4962 }
4963 }
4964 else
4965 {
4966 if (fFlags & IEM_XCPT_FLAGS_ERR)
4967 *uStackFrame.pu16++ = uErr;
4968 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4969 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4970 uStackFrame.pu16[2] = fEfl;
4971 uStackFrame.pu16[3] = pCtx->sp;
4972 uStackFrame.pu16[4] = pCtx->ss.Sel;
4973 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4974 if (fEfl & X86_EFL_VM)
4975 {
4976 uStackFrame.pu16[1] = pCtx->cs.Sel;
4977 uStackFrame.pu16[5] = pCtx->es.Sel;
4978 uStackFrame.pu16[6] = pCtx->ds.Sel;
4979 uStackFrame.pu16[7] = pCtx->fs.Sel;
4980 uStackFrame.pu16[8] = pCtx->gs.Sel;
4981 }
4982 }
4983 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4984 if (rcStrict != VINF_SUCCESS)
4985 return rcStrict;
4986
4987 /* Mark the selectors 'accessed' (hope this is the correct time). */
4988 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4989 * after pushing the stack frame? (Write protect the gdt + stack to
4990 * find out.) */
4991 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4992 {
4993 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4994 if (rcStrict != VINF_SUCCESS)
4995 return rcStrict;
4996 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4997 }
4998
4999 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5000 {
5001 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5002 if (rcStrict != VINF_SUCCESS)
5003 return rcStrict;
5004 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5005 }
5006
5007 /*
5008 * Start comitting the register changes (joins with the DPL=CPL branch).
5009 */
5010 pCtx->ss.Sel = NewSS;
5011 pCtx->ss.ValidSel = NewSS;
5012 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5013 pCtx->ss.u32Limit = cbLimitSS;
5014 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5015 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5016 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5017 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5018 * SP is loaded).
5019 * Need to check the other combinations too:
5020 * - 16-bit TSS, 32-bit handler
5021 * - 32-bit TSS, 16-bit handler */
5022 if (!pCtx->ss.Attr.n.u1DefBig)
5023 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5024 else
5025 pCtx->rsp = uNewEsp - cbStackFrame;
5026
5027 if (fEfl & X86_EFL_VM)
5028 {
5029 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5030 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5031 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5032 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5033 }
5034 }
5035 /*
5036 * Same privilege, no stack change and smaller stack frame.
5037 */
5038 else
5039 {
5040 uint64_t uNewRsp;
5041 RTPTRUNION uStackFrame;
5042 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5043 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5044 if (rcStrict != VINF_SUCCESS)
5045 return rcStrict;
5046 void * const pvStackFrame = uStackFrame.pv;
5047
5048 if (f32BitGate)
5049 {
5050 if (fFlags & IEM_XCPT_FLAGS_ERR)
5051 *uStackFrame.pu32++ = uErr;
5052 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5053 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5054 uStackFrame.pu32[2] = fEfl;
5055 }
5056 else
5057 {
5058 if (fFlags & IEM_XCPT_FLAGS_ERR)
5059 *uStackFrame.pu16++ = uErr;
5060 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5061 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5062 uStackFrame.pu16[2] = fEfl;
5063 }
5064 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5065 if (rcStrict != VINF_SUCCESS)
5066 return rcStrict;
5067
5068 /* Mark the CS selector as 'accessed'. */
5069 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5070 {
5071 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5075 }
5076
5077 /*
5078 * Start committing the register changes (joins with the other branch).
5079 */
5080 pCtx->rsp = uNewRsp;
5081 }
5082
5083 /* ... register committing continues. */
5084 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5085 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5086 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5087 pCtx->cs.u32Limit = cbLimitCS;
5088 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5089 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5090
5091 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5092 fEfl &= ~fEflToClear;
5093 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5094
5095 if (fFlags & IEM_XCPT_FLAGS_CR2)
5096 pCtx->cr2 = uCr2;
5097
5098 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5099 iemRaiseXcptAdjustState(pCtx, u8Vector);
5100
5101 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5102}
5103
5104
5105/**
5106 * Implements exceptions and interrupts for long mode.
5107 *
5108 * @returns VBox strict status code.
5109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5110 * @param pCtx The CPU context.
5111 * @param cbInstr The number of bytes to offset rIP by in the return
5112 * address.
5113 * @param u8Vector The interrupt / exception vector number.
5114 * @param fFlags The flags.
5115 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5116 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5117 */
5118IEM_STATIC VBOXSTRICTRC
5119iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5120 PCPUMCTX pCtx,
5121 uint8_t cbInstr,
5122 uint8_t u8Vector,
5123 uint32_t fFlags,
5124 uint16_t uErr,
5125 uint64_t uCr2)
5126{
5127 /*
5128 * Read the IDT entry.
5129 */
5130 uint16_t offIdt = (uint16_t)u8Vector << 4;
5131 if (pCtx->idtr.cbIdt < offIdt + 7)
5132 {
5133 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5134 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5135 }
5136 X86DESC64 Idte;
5137 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5138 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5139 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5140 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5141 return rcStrict;
5142 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5143 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5144 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5145
5146 /*
5147 * Check the descriptor type, DPL and such.
5148 * ASSUMES this is done in the same order as described for call-gate calls.
5149 */
5150 if (Idte.Gate.u1DescType)
5151 {
5152 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5153 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5154 }
5155 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5156 switch (Idte.Gate.u4Type)
5157 {
5158 case AMD64_SEL_TYPE_SYS_INT_GATE:
5159 fEflToClear |= X86_EFL_IF;
5160 break;
5161 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5162 break;
5163
5164 default:
5165 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5166 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5167 }
5168
5169 /* Check DPL against CPL if applicable. */
5170 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5171 {
5172 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5175 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5176 }
5177 }
5178
5179 /* Is it there? */
5180 if (!Idte.Gate.u1Present)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5183 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185
5186 /* A null CS is bad. */
5187 RTSEL NewCS = Idte.Gate.u16Sel;
5188 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5189 {
5190 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5191 return iemRaiseGeneralProtectionFault0(pVCpu);
5192 }
5193
5194 /* Fetch the descriptor for the new CS. */
5195 IEMSELDESC DescCS;
5196 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5197 if (rcStrict != VINF_SUCCESS)
5198 {
5199 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5200 return rcStrict;
5201 }
5202
5203 /* Must be a 64-bit code segment. */
5204 if (!DescCS.Long.Gen.u1DescType)
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5207 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5208 }
5209 if ( !DescCS.Long.Gen.u1Long
5210 || DescCS.Long.Gen.u1DefBig
5211 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5214 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5215 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5216 }
5217
5218 /* Don't allow lowering the privilege level. For non-conforming CS
5219 selectors, the CS.DPL sets the privilege level the trap/interrupt
5220 handler runs at. For conforming CS selectors, the CPL remains
5221 unchanged, but the CS.DPL must be <= CPL. */
5222 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5223 * when CPU in Ring-0. Result \#GP? */
5224 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5225 {
5226 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5227 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5228 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5229 }
5230
5231
5232 /* Make sure the selector is present. */
5233 if (!DescCS.Legacy.Gen.u1Present)
5234 {
5235 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5236 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5237 }
5238
5239 /* Check that the new RIP is canonical. */
5240 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5241 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5242 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5243 if (!IEM_IS_CANONICAL(uNewRip))
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5246 return iemRaiseGeneralProtectionFault0(pVCpu);
5247 }
5248
5249 /*
5250 * If the privilege level changes or if the IST isn't zero, we need to get
5251 * a new stack from the TSS.
5252 */
5253 uint64_t uNewRsp;
5254 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5255 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5256 if ( uNewCpl != pVCpu->iem.s.uCpl
5257 || Idte.Gate.u3IST != 0)
5258 {
5259 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5260 if (rcStrict != VINF_SUCCESS)
5261 return rcStrict;
5262 }
5263 else
5264 uNewRsp = pCtx->rsp;
5265 uNewRsp &= ~(uint64_t)0xf;
5266
5267 /*
5268 * Calc the flag image to push.
5269 */
5270 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5271 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5272 fEfl &= ~X86_EFL_RF;
5273 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5274 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5275
5276 /*
5277 * Start making changes.
5278 */
5279 /* Set the new CPL so that stack accesses use it. */
5280 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5281 pVCpu->iem.s.uCpl = uNewCpl;
5282
5283 /* Create the stack frame. */
5284 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5285 RTPTRUNION uStackFrame;
5286 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5287 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5288 if (rcStrict != VINF_SUCCESS)
5289 return rcStrict;
5290 void * const pvStackFrame = uStackFrame.pv;
5291
5292 if (fFlags & IEM_XCPT_FLAGS_ERR)
5293 *uStackFrame.pu64++ = uErr;
5294 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5295 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5296 uStackFrame.pu64[2] = fEfl;
5297 uStackFrame.pu64[3] = pCtx->rsp;
5298 uStackFrame.pu64[4] = pCtx->ss.Sel;
5299 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5300 if (rcStrict != VINF_SUCCESS)
5301 return rcStrict;
5302
5303 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5304 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5305 * after pushing the stack frame? (Write protect the gdt + stack to
5306 * find out.) */
5307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5308 {
5309 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5310 if (rcStrict != VINF_SUCCESS)
5311 return rcStrict;
5312 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5313 }
5314
5315 /*
5316 * Start comitting the register changes.
5317 */
5318 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5319 * hidden registers when interrupting 32-bit or 16-bit code! */
5320 if (uNewCpl != uOldCpl)
5321 {
5322 pCtx->ss.Sel = 0 | uNewCpl;
5323 pCtx->ss.ValidSel = 0 | uNewCpl;
5324 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5325 pCtx->ss.u32Limit = UINT32_MAX;
5326 pCtx->ss.u64Base = 0;
5327 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5328 }
5329 pCtx->rsp = uNewRsp - cbStackFrame;
5330 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5331 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5333 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5334 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5335 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5336 pCtx->rip = uNewRip;
5337
5338 fEfl &= ~fEflToClear;
5339 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5340
5341 if (fFlags & IEM_XCPT_FLAGS_CR2)
5342 pCtx->cr2 = uCr2;
5343
5344 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5345 iemRaiseXcptAdjustState(pCtx, u8Vector);
5346
5347 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5348}
5349
5350
5351/**
5352 * Implements exceptions and interrupts.
5353 *
5354 * All exceptions and interrupts goes thru this function!
5355 *
5356 * @returns VBox strict status code.
5357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5358 * @param cbInstr The number of bytes to offset rIP by in the return
5359 * address.
5360 * @param u8Vector The interrupt / exception vector number.
5361 * @param fFlags The flags.
5362 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5363 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5364 */
5365DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5366iemRaiseXcptOrInt(PVMCPU pVCpu,
5367 uint8_t cbInstr,
5368 uint8_t u8Vector,
5369 uint32_t fFlags,
5370 uint16_t uErr,
5371 uint64_t uCr2)
5372{
5373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5374#ifdef IN_RING0
5375 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5376 AssertRCReturn(rc, rc);
5377#endif
5378
5379#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5380 /*
5381 * Flush prefetch buffer
5382 */
5383 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5384#endif
5385
5386 /*
5387 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5388 */
5389 if ( pCtx->eflags.Bits.u1VM
5390 && pCtx->eflags.Bits.u2IOPL != 3
5391 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5392 && (pCtx->cr0 & X86_CR0_PE) )
5393 {
5394 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5395 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5396 u8Vector = X86_XCPT_GP;
5397 uErr = 0;
5398 }
5399#ifdef DBGFTRACE_ENABLED
5400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5401 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5402 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5403#endif
5404
5405#ifdef VBOX_WITH_NESTED_HWVIRT
5406 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5407 {
5408 /*
5409 * If the event is being injected as part of VMRUN, it isn't subject to event
5410 * intercepts in the nested-guest. However, secondary exceptions that occur
5411 * during injection of any event -are- subject to exception intercepts.
5412 * See AMD spec. 15.20 "Event Injection".
5413 */
5414 if (!pCtx->hwvirt.svm.fInterceptEvents)
5415 pCtx->hwvirt.svm.fInterceptEvents = 1;
5416 else
5417 {
5418 /*
5419 * Check and handle if the event being raised is intercepted.
5420 */
5421 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5422 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5423 return rcStrict0;
5424 }
5425 }
5426#endif /* VBOX_WITH_NESTED_HWVIRT */
5427
5428 /*
5429 * Do recursion accounting.
5430 */
5431 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5432 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5433 if (pVCpu->iem.s.cXcptRecursions == 0)
5434 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5435 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5436 else
5437 {
5438 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5439 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5440 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5441
5442 if (pVCpu->iem.s.cXcptRecursions >= 3)
5443 {
5444#ifdef DEBUG_bird
5445 AssertFailed();
5446#endif
5447 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5448 }
5449
5450 /*
5451 * Evaluate the sequence of recurring events.
5452 */
5453 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5454 NULL /* pXcptRaiseInfo */);
5455 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5456 { /* likely */ }
5457 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5458 {
5459 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5460 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5461 u8Vector = X86_XCPT_DF;
5462 uErr = 0;
5463 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5464 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5465 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5466 }
5467 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5468 {
5469 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5470 return iemInitiateCpuShutdown(pVCpu);
5471 }
5472 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5473 {
5474 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5475 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5476 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5477 return VERR_EM_GUEST_CPU_HANG;
5478 }
5479 else
5480 {
5481 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5482 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5483 return VERR_IEM_IPE_9;
5484 }
5485
5486 /*
5487 * The 'EXT' bit is set when an exception occurs during deliver of an external
5488 * event (such as an interrupt or earlier exception)[1]. Privileged software
5489 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5490 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5491 *
5492 * [1] - Intel spec. 6.13 "Error Code"
5493 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5494 * [3] - Intel Instruction reference for INT n.
5495 */
5496 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5497 && (fFlags & IEM_XCPT_FLAGS_ERR)
5498 && u8Vector != X86_XCPT_PF
5499 && u8Vector != X86_XCPT_DF)
5500 {
5501 uErr |= X86_TRAP_ERR_EXTERNAL;
5502 }
5503 }
5504
5505 pVCpu->iem.s.cXcptRecursions++;
5506 pVCpu->iem.s.uCurXcpt = u8Vector;
5507 pVCpu->iem.s.fCurXcpt = fFlags;
5508 pVCpu->iem.s.uCurXcptErr = uErr;
5509 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5510
5511 /*
5512 * Extensive logging.
5513 */
5514#if defined(LOG_ENABLED) && defined(IN_RING3)
5515 if (LogIs3Enabled())
5516 {
5517 PVM pVM = pVCpu->CTX_SUFF(pVM);
5518 char szRegs[4096];
5519 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5520 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5521 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5522 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5523 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5524 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5525 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5526 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5527 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5528 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5529 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5530 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5531 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5532 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5533 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5534 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5535 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5536 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5537 " efer=%016VR{efer}\n"
5538 " pat=%016VR{pat}\n"
5539 " sf_mask=%016VR{sf_mask}\n"
5540 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5541 " lstar=%016VR{lstar}\n"
5542 " star=%016VR{star} cstar=%016VR{cstar}\n"
5543 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5544 );
5545
5546 char szInstr[256];
5547 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5548 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5549 szInstr, sizeof(szInstr), NULL);
5550 Log3(("%s%s\n", szRegs, szInstr));
5551 }
5552#endif /* LOG_ENABLED */
5553
5554 /*
5555 * Call the mode specific worker function.
5556 */
5557 VBOXSTRICTRC rcStrict;
5558 if (!(pCtx->cr0 & X86_CR0_PE))
5559 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5560 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5561 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5562 else
5563 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5564
5565 /* Flush the prefetch buffer. */
5566#ifdef IEM_WITH_CODE_TLB
5567 pVCpu->iem.s.pbInstrBuf = NULL;
5568#else
5569 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5570#endif
5571
5572 /*
5573 * Unwind.
5574 */
5575 pVCpu->iem.s.cXcptRecursions--;
5576 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5577 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5578 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5579 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5580 return rcStrict;
5581}
5582
5583#ifdef IEM_WITH_SETJMP
5584/**
5585 * See iemRaiseXcptOrInt. Will not return.
5586 */
5587IEM_STATIC DECL_NO_RETURN(void)
5588iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5589 uint8_t cbInstr,
5590 uint8_t u8Vector,
5591 uint32_t fFlags,
5592 uint16_t uErr,
5593 uint64_t uCr2)
5594{
5595 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5596 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5597}
5598#endif
5599
5600
5601/** \#DE - 00. */
5602DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5603{
5604 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5605}
5606
5607
5608/** \#DB - 01.
5609 * @note This automatically clear DR7.GD. */
5610DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5611{
5612 /** @todo set/clear RF. */
5613 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5615}
5616
5617
5618/** \#BR - 05. */
5619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5620{
5621 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5622}
5623
5624
5625/** \#UD - 06. */
5626DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5627{
5628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5629}
5630
5631
5632/** \#NM - 07. */
5633DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5634{
5635 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5636}
5637
5638
5639/** \#TS(err) - 0a. */
5640DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5641{
5642 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5643}
5644
5645
5646/** \#TS(tr) - 0a. */
5647DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5648{
5649 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5650 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5651}
5652
5653
5654/** \#TS(0) - 0a. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5656{
5657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5658 0, 0);
5659}
5660
5661
5662/** \#TS(err) - 0a. */
5663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5664{
5665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5666 uSel & X86_SEL_MASK_OFF_RPL, 0);
5667}
5668
5669
5670/** \#NP(err) - 0b. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5674}
5675
5676
5677/** \#NP(sel) - 0b. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5681 uSel & ~X86_SEL_RPL, 0);
5682}
5683
5684
5685/** \#SS(seg) - 0c. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5689 uSel & ~X86_SEL_RPL, 0);
5690}
5691
5692
5693/** \#SS(err) - 0c. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5697}
5698
5699
5700/** \#GP(n) - 0d. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5704}
5705
5706
5707/** \#GP(0) - 0d. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5711}
5712
5713#ifdef IEM_WITH_SETJMP
5714/** \#GP(0) - 0d. */
5715DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5716{
5717 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5718}
5719#endif
5720
5721
5722/** \#GP(sel) - 0d. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5726 Sel & ~X86_SEL_RPL, 0);
5727}
5728
5729
5730/** \#GP(0) - 0d. */
5731DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5732{
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735
5736
5737/** \#GP(sel) - 0d. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5739{
5740 NOREF(iSegReg); NOREF(fAccess);
5741 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5742 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5743}
5744
5745#ifdef IEM_WITH_SETJMP
5746/** \#GP(sel) - 0d, longjmp. */
5747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5748{
5749 NOREF(iSegReg); NOREF(fAccess);
5750 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5751 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5752}
5753#endif
5754
5755/** \#GP(sel) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5757{
5758 NOREF(Sel);
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5760}
5761
5762#ifdef IEM_WITH_SETJMP
5763/** \#GP(sel) - 0d, longjmp. */
5764DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5765{
5766 NOREF(Sel);
5767 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5768}
5769#endif
5770
5771
5772/** \#GP(sel) - 0d. */
5773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5774{
5775 NOREF(iSegReg); NOREF(fAccess);
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5777}
5778
5779#ifdef IEM_WITH_SETJMP
5780/** \#GP(sel) - 0d, longjmp. */
5781DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5782 uint32_t fAccess)
5783{
5784 NOREF(iSegReg); NOREF(fAccess);
5785 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5786}
5787#endif
5788
5789
5790/** \#PF(n) - 0e. */
5791DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5792{
5793 uint16_t uErr;
5794 switch (rc)
5795 {
5796 case VERR_PAGE_NOT_PRESENT:
5797 case VERR_PAGE_TABLE_NOT_PRESENT:
5798 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5799 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5800 uErr = 0;
5801 break;
5802
5803 default:
5804 AssertMsgFailed(("%Rrc\n", rc));
5805 RT_FALL_THRU();
5806 case VERR_ACCESS_DENIED:
5807 uErr = X86_TRAP_PF_P;
5808 break;
5809
5810 /** @todo reserved */
5811 }
5812
5813 if (pVCpu->iem.s.uCpl == 3)
5814 uErr |= X86_TRAP_PF_US;
5815
5816 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5817 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5818 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5819 uErr |= X86_TRAP_PF_ID;
5820
5821#if 0 /* This is so much non-sense, really. Why was it done like that? */
5822 /* Note! RW access callers reporting a WRITE protection fault, will clear
5823 the READ flag before calling. So, read-modify-write accesses (RW)
5824 can safely be reported as READ faults. */
5825 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5826 uErr |= X86_TRAP_PF_RW;
5827#else
5828 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5829 {
5830 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5831 uErr |= X86_TRAP_PF_RW;
5832 }
5833#endif
5834
5835 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5836 uErr, GCPtrWhere);
5837}
5838
5839#ifdef IEM_WITH_SETJMP
5840/** \#PF(n) - 0e, longjmp. */
5841IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5842{
5843 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5844}
5845#endif
5846
5847
5848/** \#MF(0) - 10. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5850{
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5852}
5853
5854
5855/** \#AC(0) - 11. */
5856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5857{
5858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5859}
5860
5861
5862/**
5863 * Macro for calling iemCImplRaiseDivideError().
5864 *
5865 * This enables us to add/remove arguments and force different levels of
5866 * inlining as we wish.
5867 *
5868 * @return Strict VBox status code.
5869 */
5870#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5871IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5872{
5873 NOREF(cbInstr);
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5875}
5876
5877
5878/**
5879 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5880 *
5881 * This enables us to add/remove arguments and force different levels of
5882 * inlining as we wish.
5883 *
5884 * @return Strict VBox status code.
5885 */
5886#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5887IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5888{
5889 NOREF(cbInstr);
5890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5891}
5892
5893
5894/**
5895 * Macro for calling iemCImplRaiseInvalidOpcode().
5896 *
5897 * This enables us to add/remove arguments and force different levels of
5898 * inlining as we wish.
5899 *
5900 * @return Strict VBox status code.
5901 */
5902#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5903IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5904{
5905 NOREF(cbInstr);
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/** @} */
5911
5912
5913/*
5914 *
5915 * Helpers routines.
5916 * Helpers routines.
5917 * Helpers routines.
5918 *
5919 */
5920
5921/**
5922 * Recalculates the effective operand size.
5923 *
5924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5925 */
5926IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5927{
5928 switch (pVCpu->iem.s.enmCpuMode)
5929 {
5930 case IEMMODE_16BIT:
5931 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5932 break;
5933 case IEMMODE_32BIT:
5934 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5935 break;
5936 case IEMMODE_64BIT:
5937 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5938 {
5939 case 0:
5940 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5941 break;
5942 case IEM_OP_PRF_SIZE_OP:
5943 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5944 break;
5945 case IEM_OP_PRF_SIZE_REX_W:
5946 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5947 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5948 break;
5949 }
5950 break;
5951 default:
5952 AssertFailed();
5953 }
5954}
5955
5956
5957/**
5958 * Sets the default operand size to 64-bit and recalculates the effective
5959 * operand size.
5960 *
5961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5962 */
5963IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5964{
5965 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5966 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5967 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5968 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5969 else
5970 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5971}
5972
5973
5974/*
5975 *
5976 * Common opcode decoders.
5977 * Common opcode decoders.
5978 * Common opcode decoders.
5979 *
5980 */
5981//#include <iprt/mem.h>
5982
5983/**
5984 * Used to add extra details about a stub case.
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 */
5987IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5988{
5989#if defined(LOG_ENABLED) && defined(IN_RING3)
5990 PVM pVM = pVCpu->CTX_SUFF(pVM);
5991 char szRegs[4096];
5992 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5993 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5994 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5995 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5996 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5997 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5998 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5999 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6000 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6001 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6002 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6003 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6004 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6005 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6006 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6007 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6008 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6009 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6010 " efer=%016VR{efer}\n"
6011 " pat=%016VR{pat}\n"
6012 " sf_mask=%016VR{sf_mask}\n"
6013 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6014 " lstar=%016VR{lstar}\n"
6015 " star=%016VR{star} cstar=%016VR{cstar}\n"
6016 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6017 );
6018
6019 char szInstr[256];
6020 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6021 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6022 szInstr, sizeof(szInstr), NULL);
6023
6024 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6025#else
6026 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6027#endif
6028}
6029
6030/**
6031 * Complains about a stub.
6032 *
6033 * Providing two versions of this macro, one for daily use and one for use when
6034 * working on IEM.
6035 */
6036#if 0
6037# define IEMOP_BITCH_ABOUT_STUB() \
6038 do { \
6039 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6040 iemOpStubMsg2(pVCpu); \
6041 RTAssertPanic(); \
6042 } while (0)
6043#else
6044# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6045#endif
6046
6047/** Stubs an opcode. */
6048#define FNIEMOP_STUB(a_Name) \
6049 FNIEMOP_DEF(a_Name) \
6050 { \
6051 RT_NOREF_PV(pVCpu); \
6052 IEMOP_BITCH_ABOUT_STUB(); \
6053 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6054 } \
6055 typedef int ignore_semicolon
6056
6057/** Stubs an opcode. */
6058#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6059 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6060 { \
6061 RT_NOREF_PV(pVCpu); \
6062 RT_NOREF_PV(a_Name0); \
6063 IEMOP_BITCH_ABOUT_STUB(); \
6064 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6065 } \
6066 typedef int ignore_semicolon
6067
6068/** Stubs an opcode which currently should raise \#UD. */
6069#define FNIEMOP_UD_STUB(a_Name) \
6070 FNIEMOP_DEF(a_Name) \
6071 { \
6072 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6073 return IEMOP_RAISE_INVALID_OPCODE(); \
6074 } \
6075 typedef int ignore_semicolon
6076
6077/** Stubs an opcode which currently should raise \#UD. */
6078#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6079 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6080 { \
6081 RT_NOREF_PV(pVCpu); \
6082 RT_NOREF_PV(a_Name0); \
6083 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6084 return IEMOP_RAISE_INVALID_OPCODE(); \
6085 } \
6086 typedef int ignore_semicolon
6087
6088
6089
6090/** @name Register Access.
6091 * @{
6092 */
6093
6094/**
6095 * Gets a reference (pointer) to the specified hidden segment register.
6096 *
6097 * @returns Hidden register reference.
6098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6099 * @param iSegReg The segment register.
6100 */
6101IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6102{
6103 Assert(iSegReg < X86_SREG_COUNT);
6104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6105 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6106
6107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6108 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6109 { /* likely */ }
6110 else
6111 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6112#else
6113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6114#endif
6115 return pSReg;
6116}
6117
6118
6119/**
6120 * Ensures that the given hidden segment register is up to date.
6121 *
6122 * @returns Hidden register reference.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 * @param pSReg The segment register.
6125 */
6126IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6127{
6128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6129 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6130 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6131#else
6132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6133 NOREF(pVCpu);
6134#endif
6135 return pSReg;
6136}
6137
6138
6139/**
6140 * Gets a reference (pointer) to the specified segment register (the selector
6141 * value).
6142 *
6143 * @returns Pointer to the selector variable.
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 * @param iSegReg The segment register.
6146 */
6147DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6148{
6149 Assert(iSegReg < X86_SREG_COUNT);
6150 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6151 return &pCtx->aSRegs[iSegReg].Sel;
6152}
6153
6154
6155/**
6156 * Fetches the selector value of a segment register.
6157 *
6158 * @returns The selector value.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6166}
6167
6168
6169/**
6170 * Fetches the base address value of a segment register.
6171 *
6172 * @returns The selector value.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 * @param iSegReg The segment register.
6175 */
6176DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6177{
6178 Assert(iSegReg < X86_SREG_COUNT);
6179 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6180}
6181
6182
6183/**
6184 * Gets a reference (pointer) to the specified general purpose register.
6185 *
6186 * @returns Register reference.
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param iReg The general purpose register.
6189 */
6190DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6191{
6192 Assert(iReg < 16);
6193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6194 return &pCtx->aGRegs[iReg];
6195}
6196
6197
6198/**
6199 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6200 *
6201 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6202 *
6203 * @returns Register reference.
6204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6205 * @param iReg The register.
6206 */
6207DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6208{
6209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6210 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6211 {
6212 Assert(iReg < 16);
6213 return &pCtx->aGRegs[iReg].u8;
6214 }
6215 /* high 8-bit register. */
6216 Assert(iReg < 8);
6217 return &pCtx->aGRegs[iReg & 3].bHi;
6218}
6219
6220
6221/**
6222 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6223 *
6224 * @returns Register reference.
6225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6226 * @param iReg The register.
6227 */
6228DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6229{
6230 Assert(iReg < 16);
6231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6232 return &pCtx->aGRegs[iReg].u16;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u32;
6248}
6249
6250
6251/**
6252 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6253 *
6254 * @returns Register reference.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 Assert(iReg < 64);
6261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6262 return &pCtx->aGRegs[iReg].u64;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified segment register's base address.
6268 *
6269 * @returns Segment register base address reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iSegReg The segment selector.
6272 */
6273DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6274{
6275 Assert(iSegReg < X86_SREG_COUNT);
6276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6277 return &pCtx->aSRegs[iSegReg].u64Base;
6278}
6279
6280
6281/**
6282 * Fetches the value of a 8-bit general purpose register.
6283 *
6284 * @returns The register value.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iReg The register.
6287 */
6288DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6289{
6290 return *iemGRegRefU8(pVCpu, iReg);
6291}
6292
6293
6294/**
6295 * Fetches the value of a 16-bit general purpose register.
6296 *
6297 * @returns The register value.
6298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6299 * @param iReg The register.
6300 */
6301DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6302{
6303 Assert(iReg < 16);
6304 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6305}
6306
6307
6308/**
6309 * Fetches the value of a 32-bit general purpose register.
6310 *
6311 * @returns The register value.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iReg The register.
6314 */
6315DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6316{
6317 Assert(iReg < 16);
6318 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6319}
6320
6321
6322/**
6323 * Fetches the value of a 64-bit general purpose register.
6324 *
6325 * @returns The register value.
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param iReg The register.
6328 */
6329DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6330{
6331 Assert(iReg < 16);
6332 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6333}
6334
6335
6336/**
6337 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6338 *
6339 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6340 * segment limit.
6341 *
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param offNextInstr The offset of the next instruction.
6344 */
6345IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6346{
6347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6348 switch (pVCpu->iem.s.enmEffOpSize)
6349 {
6350 case IEMMODE_16BIT:
6351 {
6352 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6353 if ( uNewIp > pCtx->cs.u32Limit
6354 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6355 return iemRaiseGeneralProtectionFault0(pVCpu);
6356 pCtx->rip = uNewIp;
6357 break;
6358 }
6359
6360 case IEMMODE_32BIT:
6361 {
6362 Assert(pCtx->rip <= UINT32_MAX);
6363 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6364
6365 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6366 if (uNewEip > pCtx->cs.u32Limit)
6367 return iemRaiseGeneralProtectionFault0(pVCpu);
6368 pCtx->rip = uNewEip;
6369 break;
6370 }
6371
6372 case IEMMODE_64BIT:
6373 {
6374 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6375
6376 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6377 if (!IEM_IS_CANONICAL(uNewRip))
6378 return iemRaiseGeneralProtectionFault0(pVCpu);
6379 pCtx->rip = uNewRip;
6380 break;
6381 }
6382
6383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6384 }
6385
6386 pCtx->eflags.Bits.u1RF = 0;
6387
6388#ifndef IEM_WITH_CODE_TLB
6389 /* Flush the prefetch buffer. */
6390 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6391#endif
6392
6393 return VINF_SUCCESS;
6394}
6395
6396
6397/**
6398 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6399 *
6400 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6401 * segment limit.
6402 *
6403 * @returns Strict VBox status code.
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param offNextInstr The offset of the next instruction.
6406 */
6407IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6408{
6409 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6410 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6411
6412 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6413 if ( uNewIp > pCtx->cs.u32Limit
6414 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6415 return iemRaiseGeneralProtectionFault0(pVCpu);
6416 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6417 pCtx->rip = uNewIp;
6418 pCtx->eflags.Bits.u1RF = 0;
6419
6420#ifndef IEM_WITH_CODE_TLB
6421 /* Flush the prefetch buffer. */
6422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6423#endif
6424
6425 return VINF_SUCCESS;
6426}
6427
6428
6429/**
6430 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6431 *
6432 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6433 * segment limit.
6434 *
6435 * @returns Strict VBox status code.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param offNextInstr The offset of the next instruction.
6438 */
6439IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6440{
6441 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6442 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6443
6444 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6445 {
6446 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6447
6448 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6449 if (uNewEip > pCtx->cs.u32Limit)
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 pCtx->rip = uNewEip;
6452 }
6453 else
6454 {
6455 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6456
6457 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6458 if (!IEM_IS_CANONICAL(uNewRip))
6459 return iemRaiseGeneralProtectionFault0(pVCpu);
6460 pCtx->rip = uNewRip;
6461 }
6462 pCtx->eflags.Bits.u1RF = 0;
6463
6464#ifndef IEM_WITH_CODE_TLB
6465 /* Flush the prefetch buffer. */
6466 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6467#endif
6468
6469 return VINF_SUCCESS;
6470}
6471
6472
6473/**
6474 * Performs a near jump to the specified address.
6475 *
6476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6477 * segment limit.
6478 *
6479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6480 * @param uNewRip The new RIP value.
6481 */
6482IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6483{
6484 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6485 switch (pVCpu->iem.s.enmEffOpSize)
6486 {
6487 case IEMMODE_16BIT:
6488 {
6489 Assert(uNewRip <= UINT16_MAX);
6490 if ( uNewRip > pCtx->cs.u32Limit
6491 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6492 return iemRaiseGeneralProtectionFault0(pVCpu);
6493 /** @todo Test 16-bit jump in 64-bit mode. */
6494 pCtx->rip = uNewRip;
6495 break;
6496 }
6497
6498 case IEMMODE_32BIT:
6499 {
6500 Assert(uNewRip <= UINT32_MAX);
6501 Assert(pCtx->rip <= UINT32_MAX);
6502 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6503
6504 if (uNewRip > pCtx->cs.u32Limit)
6505 return iemRaiseGeneralProtectionFault0(pVCpu);
6506 pCtx->rip = uNewRip;
6507 break;
6508 }
6509
6510 case IEMMODE_64BIT:
6511 {
6512 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6513
6514 if (!IEM_IS_CANONICAL(uNewRip))
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pCtx->rip = uNewRip;
6517 break;
6518 }
6519
6520 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6521 }
6522
6523 pCtx->eflags.Bits.u1RF = 0;
6524
6525#ifndef IEM_WITH_CODE_TLB
6526 /* Flush the prefetch buffer. */
6527 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6528#endif
6529
6530 return VINF_SUCCESS;
6531}
6532
6533
6534/**
6535 * Get the address of the top of the stack.
6536 *
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param pCtx The CPU context which SP/ESP/RSP should be
6539 * read.
6540 */
6541DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6542{
6543 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6544 return pCtx->rsp;
6545 if (pCtx->ss.Attr.n.u1DefBig)
6546 return pCtx->esp;
6547 return pCtx->sp;
6548}
6549
6550
6551/**
6552 * Updates the RIP/EIP/IP to point to the next instruction.
6553 *
6554 * This function leaves the EFLAGS.RF flag alone.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param cbInstr The number of bytes to add.
6558 */
6559IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6560{
6561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6562 switch (pVCpu->iem.s.enmCpuMode)
6563 {
6564 case IEMMODE_16BIT:
6565 Assert(pCtx->rip <= UINT16_MAX);
6566 pCtx->eip += cbInstr;
6567 pCtx->eip &= UINT32_C(0xffff);
6568 break;
6569
6570 case IEMMODE_32BIT:
6571 pCtx->eip += cbInstr;
6572 Assert(pCtx->rip <= UINT32_MAX);
6573 break;
6574
6575 case IEMMODE_64BIT:
6576 pCtx->rip += cbInstr;
6577 break;
6578 default: AssertFailed();
6579 }
6580}
6581
6582
6583#if 0
6584/**
6585 * Updates the RIP/EIP/IP to point to the next instruction.
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6590{
6591 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6592}
6593#endif
6594
6595
6596
6597/**
6598 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6599 *
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 * @param cbInstr The number of bytes to add.
6602 */
6603IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6604{
6605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6606
6607 pCtx->eflags.Bits.u1RF = 0;
6608
6609 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6610#if ARCH_BITS >= 64
6611 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6612 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6613 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6614#else
6615 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6616 pCtx->rip += cbInstr;
6617 else
6618 pCtx->eip += cbInstr;
6619#endif
6620}
6621
6622
6623/**
6624 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 */
6628IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6629{
6630 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6631}
6632
6633
6634/**
6635 * Adds to the stack pointer.
6636 *
6637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6638 * @param pCtx The CPU context which SP/ESP/RSP should be
6639 * updated.
6640 * @param cbToAdd The number of bytes to add (8-bit!).
6641 */
6642DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6643{
6644 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6645 pCtx->rsp += cbToAdd;
6646 else if (pCtx->ss.Attr.n.u1DefBig)
6647 pCtx->esp += cbToAdd;
6648 else
6649 pCtx->sp += cbToAdd;
6650}
6651
6652
6653/**
6654 * Subtracts from the stack pointer.
6655 *
6656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6657 * @param pCtx The CPU context which SP/ESP/RSP should be
6658 * updated.
6659 * @param cbToSub The number of bytes to subtract (8-bit!).
6660 */
6661DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6662{
6663 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6664 pCtx->rsp -= cbToSub;
6665 else if (pCtx->ss.Attr.n.u1DefBig)
6666 pCtx->esp -= cbToSub;
6667 else
6668 pCtx->sp -= cbToSub;
6669}
6670
6671
6672/**
6673 * Adds to the temporary stack pointer.
6674 *
6675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6676 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6677 * @param cbToAdd The number of bytes to add (16-bit).
6678 * @param pCtx Where to get the current stack mode.
6679 */
6680DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6681{
6682 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6683 pTmpRsp->u += cbToAdd;
6684 else if (pCtx->ss.Attr.n.u1DefBig)
6685 pTmpRsp->DWords.dw0 += cbToAdd;
6686 else
6687 pTmpRsp->Words.w0 += cbToAdd;
6688}
6689
6690
6691/**
6692 * Subtracts from the temporary stack pointer.
6693 *
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6696 * @param cbToSub The number of bytes to subtract.
6697 * @param pCtx Where to get the current stack mode.
6698 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6699 * expecting that.
6700 */
6701DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u -= cbToSub;
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 -= cbToSub;
6707 else
6708 pTmpRsp->Words.w0 -= cbToSub;
6709}
6710
6711
6712/**
6713 * Calculates the effective stack address for a push of the specified size as
6714 * well as the new RSP value (upper bits may be masked).
6715 *
6716 * @returns Effective stack addressf for the push.
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param pCtx Where to get the current stack mode.
6719 * @param cbItem The size of the stack item to pop.
6720 * @param puNewRsp Where to return the new RSP value.
6721 */
6722DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6723{
6724 RTUINT64U uTmpRsp;
6725 RTGCPTR GCPtrTop;
6726 uTmpRsp.u = pCtx->rsp;
6727
6728 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6729 GCPtrTop = uTmpRsp.u -= cbItem;
6730 else if (pCtx->ss.Attr.n.u1DefBig)
6731 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6732 else
6733 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6734 *puNewRsp = uTmpRsp.u;
6735 return GCPtrTop;
6736}
6737
6738
6739/**
6740 * Gets the current stack pointer and calculates the value after a pop of the
6741 * specified size.
6742 *
6743 * @returns Current stack pointer.
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param pCtx Where to get the current stack mode.
6746 * @param cbItem The size of the stack item to pop.
6747 * @param puNewRsp Where to return the new RSP value.
6748 */
6749DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6750{
6751 RTUINT64U uTmpRsp;
6752 RTGCPTR GCPtrTop;
6753 uTmpRsp.u = pCtx->rsp;
6754
6755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6756 {
6757 GCPtrTop = uTmpRsp.u;
6758 uTmpRsp.u += cbItem;
6759 }
6760 else if (pCtx->ss.Attr.n.u1DefBig)
6761 {
6762 GCPtrTop = uTmpRsp.DWords.dw0;
6763 uTmpRsp.DWords.dw0 += cbItem;
6764 }
6765 else
6766 {
6767 GCPtrTop = uTmpRsp.Words.w0;
6768 uTmpRsp.Words.w0 += cbItem;
6769 }
6770 *puNewRsp = uTmpRsp.u;
6771 return GCPtrTop;
6772}
6773
6774
6775/**
6776 * Calculates the effective stack address for a push of the specified size as
6777 * well as the new temporary RSP value (upper bits may be masked).
6778 *
6779 * @returns Effective stack addressf for the push.
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param pCtx Where to get the current stack mode.
6782 * @param pTmpRsp The temporary stack pointer. This is updated.
6783 * @param cbItem The size of the stack item to pop.
6784 */
6785DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6786{
6787 RTGCPTR GCPtrTop;
6788
6789 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6790 GCPtrTop = pTmpRsp->u -= cbItem;
6791 else if (pCtx->ss.Attr.n.u1DefBig)
6792 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6793 else
6794 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6795 return GCPtrTop;
6796}
6797
6798
6799/**
6800 * Gets the effective stack address for a pop of the specified size and
6801 * calculates and updates the temporary RSP.
6802 *
6803 * @returns Current stack pointer.
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param pCtx Where to get the current stack mode.
6806 * @param pTmpRsp The temporary stack pointer. This is updated.
6807 * @param cbItem The size of the stack item to pop.
6808 */
6809DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6810{
6811 RTGCPTR GCPtrTop;
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 {
6814 GCPtrTop = pTmpRsp->u;
6815 pTmpRsp->u += cbItem;
6816 }
6817 else if (pCtx->ss.Attr.n.u1DefBig)
6818 {
6819 GCPtrTop = pTmpRsp->DWords.dw0;
6820 pTmpRsp->DWords.dw0 += cbItem;
6821 }
6822 else
6823 {
6824 GCPtrTop = pTmpRsp->Words.w0;
6825 pTmpRsp->Words.w0 += cbItem;
6826 }
6827 return GCPtrTop;
6828}
6829
6830/** @} */
6831
6832
6833/** @name FPU access and helpers.
6834 *
6835 * @{
6836 */
6837
6838
6839/**
6840 * Hook for preparing to use the host FPU.
6841 *
6842 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 */
6846DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6847{
6848#ifdef IN_RING3
6849 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6850#else
6851 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6852#endif
6853}
6854
6855
6856/**
6857 * Hook for preparing to use the host FPU for SSE.
6858 *
6859 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6860 *
6861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6862 */
6863DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6864{
6865 iemFpuPrepareUsage(pVCpu);
6866}
6867
6868
6869/**
6870 * Hook for preparing to use the host FPU for AVX.
6871 *
6872 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6873 *
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 */
6876DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6877{
6878 iemFpuPrepareUsage(pVCpu);
6879}
6880
6881
6882/**
6883 * Hook for actualizing the guest FPU state before the interpreter reads it.
6884 *
6885 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6886 *
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 */
6889DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6890{
6891#ifdef IN_RING3
6892 NOREF(pVCpu);
6893#else
6894 CPUMRZFpuStateActualizeForRead(pVCpu);
6895#endif
6896}
6897
6898
6899/**
6900 * Hook for actualizing the guest FPU state before the interpreter changes it.
6901 *
6902 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6903 *
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 */
6906DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6907{
6908#ifdef IN_RING3
6909 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6910#else
6911 CPUMRZFpuStateActualizeForChange(pVCpu);
6912#endif
6913}
6914
6915
6916/**
6917 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6918 * only.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6925{
6926#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6927 NOREF(pVCpu);
6928#else
6929 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6930#endif
6931}
6932
6933
6934/**
6935 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6936 * read+write.
6937 *
6938 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6943{
6944#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6945 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6946#else
6947 CPUMRZFpuStateActualizeForChange(pVCpu);
6948#endif
6949}
6950
6951
6952/**
6953 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6954 * only.
6955 *
6956 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 */
6960DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6961{
6962#ifdef IN_RING3
6963 NOREF(pVCpu);
6964#else
6965 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6966#endif
6967}
6968
6969
6970/**
6971 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6972 * read+write.
6973 *
6974 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 */
6978DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6979{
6980#ifdef IN_RING3
6981 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6982#else
6983 CPUMRZFpuStateActualizeForChange(pVCpu);
6984#endif
6985}
6986
6987
6988/**
6989 * Stores a QNaN value into a FPU register.
6990 *
6991 * @param pReg Pointer to the register.
6992 */
6993DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6994{
6995 pReg->au32[0] = UINT32_C(0x00000000);
6996 pReg->au32[1] = UINT32_C(0xc0000000);
6997 pReg->au16[4] = UINT16_C(0xffff);
6998}
6999
7000
7001/**
7002 * Updates the FOP, FPU.CS and FPUIP registers.
7003 *
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 * @param pCtx The CPU context.
7006 * @param pFpuCtx The FPU context.
7007 */
7008DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7009{
7010 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7011 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7012 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7013 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7014 {
7015 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7016 * happens in real mode here based on the fnsave and fnstenv images. */
7017 pFpuCtx->CS = 0;
7018 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7019 }
7020 else
7021 {
7022 pFpuCtx->CS = pCtx->cs.Sel;
7023 pFpuCtx->FPUIP = pCtx->rip;
7024 }
7025}
7026
7027
7028/**
7029 * Updates the x87.DS and FPUDP registers.
7030 *
7031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7032 * @param pCtx The CPU context.
7033 * @param pFpuCtx The FPU context.
7034 * @param iEffSeg The effective segment register.
7035 * @param GCPtrEff The effective address relative to @a iEffSeg.
7036 */
7037DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7038{
7039 RTSEL sel;
7040 switch (iEffSeg)
7041 {
7042 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7043 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7044 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7045 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7046 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7047 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7048 default:
7049 AssertMsgFailed(("%d\n", iEffSeg));
7050 sel = pCtx->ds.Sel;
7051 }
7052 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7053 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7054 {
7055 pFpuCtx->DS = 0;
7056 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7057 }
7058 else
7059 {
7060 pFpuCtx->DS = sel;
7061 pFpuCtx->FPUDP = GCPtrEff;
7062 }
7063}
7064
7065
7066/**
7067 * Rotates the stack registers in the push direction.
7068 *
7069 * @param pFpuCtx The FPU context.
7070 * @remarks This is a complete waste of time, but fxsave stores the registers in
7071 * stack order.
7072 */
7073DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7074{
7075 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7076 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7077 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7078 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7079 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7080 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7081 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7082 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7083 pFpuCtx->aRegs[0].r80 = r80Tmp;
7084}
7085
7086
7087/**
7088 * Rotates the stack registers in the pop direction.
7089 *
7090 * @param pFpuCtx The FPU context.
7091 * @remarks This is a complete waste of time, but fxsave stores the registers in
7092 * stack order.
7093 */
7094DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7095{
7096 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7097 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7098 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7099 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7100 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7101 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7102 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7103 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7104 pFpuCtx->aRegs[7].r80 = r80Tmp;
7105}
7106
7107
7108/**
7109 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7110 * exception prevents it.
7111 *
7112 * @param pResult The FPU operation result to push.
7113 * @param pFpuCtx The FPU context.
7114 */
7115IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7116{
7117 /* Update FSW and bail if there are pending exceptions afterwards. */
7118 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7119 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7120 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7121 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7122 {
7123 pFpuCtx->FSW = fFsw;
7124 return;
7125 }
7126
7127 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7128 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7129 {
7130 /* All is fine, push the actual value. */
7131 pFpuCtx->FTW |= RT_BIT(iNewTop);
7132 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7133 }
7134 else if (pFpuCtx->FCW & X86_FCW_IM)
7135 {
7136 /* Masked stack overflow, push QNaN. */
7137 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7138 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7139 }
7140 else
7141 {
7142 /* Raise stack overflow, don't push anything. */
7143 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7144 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7145 return;
7146 }
7147
7148 fFsw &= ~X86_FSW_TOP_MASK;
7149 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7150 pFpuCtx->FSW = fFsw;
7151
7152 iemFpuRotateStackPush(pFpuCtx);
7153}
7154
7155
7156/**
7157 * Stores a result in a FPU register and updates the FSW and FTW.
7158 *
7159 * @param pFpuCtx The FPU context.
7160 * @param pResult The result to store.
7161 * @param iStReg Which FPU register to store it in.
7162 */
7163IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7164{
7165 Assert(iStReg < 8);
7166 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7167 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7168 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7169 pFpuCtx->FTW |= RT_BIT(iReg);
7170 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7171}
7172
7173
7174/**
7175 * Only updates the FPU status word (FSW) with the result of the current
7176 * instruction.
7177 *
7178 * @param pFpuCtx The FPU context.
7179 * @param u16FSW The FSW output of the current instruction.
7180 */
7181IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7182{
7183 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7185}
7186
7187
7188/**
7189 * Pops one item off the FPU stack if no pending exception prevents it.
7190 *
7191 * @param pFpuCtx The FPU context.
7192 */
7193IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7194{
7195 /* Check pending exceptions. */
7196 uint16_t uFSW = pFpuCtx->FSW;
7197 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7198 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7199 return;
7200
7201 /* TOP--. */
7202 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7203 uFSW &= ~X86_FSW_TOP_MASK;
7204 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7205 pFpuCtx->FSW = uFSW;
7206
7207 /* Mark the previous ST0 as empty. */
7208 iOldTop >>= X86_FSW_TOP_SHIFT;
7209 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7210
7211 /* Rotate the registers. */
7212 iemFpuRotateStackPop(pFpuCtx);
7213}
7214
7215
7216/**
7217 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7218 *
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pResult The FPU operation result to push.
7221 */
7222IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7223{
7224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7225 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7227 iemFpuMaybePushResult(pResult, pFpuCtx);
7228}
7229
7230
7231/**
7232 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7233 * and sets FPUDP and FPUDS.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pResult The FPU operation result to push.
7237 * @param iEffSeg The effective segment register.
7238 * @param GCPtrEff The effective address relative to @a iEffSeg.
7239 */
7240IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7241{
7242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7243 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7244 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7246 iemFpuMaybePushResult(pResult, pFpuCtx);
7247}
7248
7249
7250/**
7251 * Replace ST0 with the first value and push the second onto the FPU stack,
7252 * unless a pending exception prevents it.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pResult The FPU operation result to store and push.
7256 */
7257IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7258{
7259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7260 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262
7263 /* Update FSW and bail if there are pending exceptions afterwards. */
7264 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7265 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7266 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7267 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7268 {
7269 pFpuCtx->FSW = fFsw;
7270 return;
7271 }
7272
7273 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7274 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7275 {
7276 /* All is fine, push the actual value. */
7277 pFpuCtx->FTW |= RT_BIT(iNewTop);
7278 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7279 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7280 }
7281 else if (pFpuCtx->FCW & X86_FCW_IM)
7282 {
7283 /* Masked stack overflow, push QNaN. */
7284 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7285 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7286 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7287 }
7288 else
7289 {
7290 /* Raise stack overflow, don't push anything. */
7291 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7292 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7293 return;
7294 }
7295
7296 fFsw &= ~X86_FSW_TOP_MASK;
7297 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7298 pFpuCtx->FSW = fFsw;
7299
7300 iemFpuRotateStackPush(pFpuCtx);
7301}
7302
7303
7304/**
7305 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7306 * FOP.
7307 *
7308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7309 * @param pResult The result to store.
7310 * @param iStReg Which FPU register to store it in.
7311 */
7312IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7313{
7314 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7315 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7316 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7317 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7318}
7319
7320
7321/**
7322 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7323 * FOP, and then pops the stack.
7324 *
7325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7326 * @param pResult The result to store.
7327 * @param iStReg Which FPU register to store it in.
7328 */
7329IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7330{
7331 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7332 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7333 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7334 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7335 iemFpuMaybePopOne(pFpuCtx);
7336}
7337
7338
7339/**
7340 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7341 * FPUDP, and FPUDS.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pResult The result to store.
7345 * @param iStReg Which FPU register to store it in.
7346 * @param iEffSeg The effective memory operand selector register.
7347 * @param GCPtrEff The effective memory operand offset.
7348 */
7349IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7350 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7351{
7352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7353 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7354 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7356 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7357}
7358
7359
7360/**
7361 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7362 * FPUDP, and FPUDS, and then pops the stack.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The result to store.
7366 * @param iStReg Which FPU register to store it in.
7367 * @param iEffSeg The effective memory operand selector register.
7368 * @param GCPtrEff The effective memory operand offset.
7369 */
7370IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7371 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7372{
7373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7374 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7375 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7377 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7378 iemFpuMaybePopOne(pFpuCtx);
7379}
7380
7381
7382/**
7383 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 */
7387IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7388{
7389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7390 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7392}
7393
7394
7395/**
7396 * Marks the specified stack register as free (for FFREE).
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param iStReg The register to free.
7400 */
7401IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7402{
7403 Assert(iStReg < 8);
7404 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7405 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7406 pFpuCtx->FTW &= ~RT_BIT(iReg);
7407}
7408
7409
7410/**
7411 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 */
7415IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7416{
7417 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7418 uint16_t uFsw = pFpuCtx->FSW;
7419 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7420 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7421 uFsw &= ~X86_FSW_TOP_MASK;
7422 uFsw |= uTop;
7423 pFpuCtx->FSW = uFsw;
7424}
7425
7426
7427/**
7428 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 */
7432IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7433{
7434 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7435 uint16_t uFsw = pFpuCtx->FSW;
7436 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7437 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7438 uFsw &= ~X86_FSW_TOP_MASK;
7439 uFsw |= uTop;
7440 pFpuCtx->FSW = uFsw;
7441}
7442
7443
7444/**
7445 * Updates the FSW, FOP, FPUIP, and FPUCS.
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param u16FSW The FSW from the current instruction.
7449 */
7450IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7451{
7452 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7453 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7454 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7455 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7456}
7457
7458
7459/**
7460 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param u16FSW The FSW from the current instruction.
7464 */
7465IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7466{
7467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7468 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7470 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7471 iemFpuMaybePopOne(pFpuCtx);
7472}
7473
7474
7475/**
7476 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param u16FSW The FSW from the current instruction.
7480 * @param iEffSeg The effective memory operand selector register.
7481 * @param GCPtrEff The effective memory operand offset.
7482 */
7483IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7484{
7485 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7486 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7487 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7488 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7489 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7490}
7491
7492
7493/**
7494 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param u16FSW The FSW from the current instruction.
7498 */
7499IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7500{
7501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7502 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505 iemFpuMaybePopOne(pFpuCtx);
7506 iemFpuMaybePopOne(pFpuCtx);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 * @param iEffSeg The effective memory operand selector register.
7516 * @param GCPtrEff The effective memory operand offset.
7517 */
7518IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7519{
7520 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7524 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7525 iemFpuMaybePopOne(pFpuCtx);
7526}
7527
7528
7529/**
7530 * Worker routine for raising an FPU stack underflow exception.
7531 *
7532 * @param pFpuCtx The FPU context.
7533 * @param iStReg The stack register being accessed.
7534 */
7535IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7536{
7537 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7538 if (pFpuCtx->FCW & X86_FCW_IM)
7539 {
7540 /* Masked underflow. */
7541 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7542 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7543 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7544 if (iStReg != UINT8_MAX)
7545 {
7546 pFpuCtx->FTW |= RT_BIT(iReg);
7547 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7548 }
7549 }
7550 else
7551 {
7552 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7553 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7554 }
7555}
7556
7557
7558/**
7559 * Raises a FPU stack underflow exception.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param iStReg The destination register that should be loaded
7563 * with QNaN if \#IS is not masked. Specify
7564 * UINT8_MAX if none (like for fcom).
7565 */
7566DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7567{
7568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7569 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7571 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7572}
7573
7574
7575DECL_NO_INLINE(IEM_STATIC, void)
7576iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7577{
7578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7579 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7580 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7581 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7582 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7583}
7584
7585
7586DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7587{
7588 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7589 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7590 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7591 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7592 iemFpuMaybePopOne(pFpuCtx);
7593}
7594
7595
7596DECL_NO_INLINE(IEM_STATIC, void)
7597iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7598{
7599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7601 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604 iemFpuMaybePopOne(pFpuCtx);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7614 iemFpuMaybePopOne(pFpuCtx);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void)
7620iemFpuStackPushUnderflow(PVMCPU pVCpu)
7621{
7622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7623 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7625
7626 if (pFpuCtx->FCW & X86_FCW_IM)
7627 {
7628 /* Masked overflow - Push QNaN. */
7629 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7630 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7631 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7632 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7633 pFpuCtx->FTW |= RT_BIT(iNewTop);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7635 iemFpuRotateStackPush(pFpuCtx);
7636 }
7637 else
7638 {
7639 /* Exception pending - don't change TOP or the register stack. */
7640 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7641 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7642 }
7643}
7644
7645
7646DECL_NO_INLINE(IEM_STATIC, void)
7647iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7648{
7649 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7650 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7652
7653 if (pFpuCtx->FCW & X86_FCW_IM)
7654 {
7655 /* Masked overflow - Push QNaN. */
7656 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7657 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7658 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7659 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7660 pFpuCtx->FTW |= RT_BIT(iNewTop);
7661 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7662 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7663 iemFpuRotateStackPush(pFpuCtx);
7664 }
7665 else
7666 {
7667 /* Exception pending - don't change TOP or the register stack. */
7668 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7669 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7670 }
7671}
7672
7673
7674/**
7675 * Worker routine for raising an FPU stack overflow exception on a push.
7676 *
7677 * @param pFpuCtx The FPU context.
7678 */
7679IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7680{
7681 if (pFpuCtx->FCW & X86_FCW_IM)
7682 {
7683 /* Masked overflow. */
7684 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7685 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7686 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7687 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7688 pFpuCtx->FTW |= RT_BIT(iNewTop);
7689 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7690 iemFpuRotateStackPush(pFpuCtx);
7691 }
7692 else
7693 {
7694 /* Exception pending - don't change TOP or the register stack. */
7695 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7696 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7697 }
7698}
7699
7700
7701/**
7702 * Raises a FPU stack overflow exception on a push.
7703 *
7704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7705 */
7706DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7707{
7708 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7709 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7710 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7711 iemFpuStackPushOverflowOnly(pFpuCtx);
7712}
7713
7714
7715/**
7716 * Raises a FPU stack overflow exception on a push with a memory operand.
7717 *
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param iEffSeg The effective memory operand selector register.
7720 * @param GCPtrEff The effective memory operand offset.
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void)
7723iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7724{
7725 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7726 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7727 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7728 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7729 iemFpuStackPushOverflowOnly(pFpuCtx);
7730}
7731
7732
7733IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7734{
7735 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7736 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7737 if (pFpuCtx->FTW & RT_BIT(iReg))
7738 return VINF_SUCCESS;
7739 return VERR_NOT_FOUND;
7740}
7741
7742
7743IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7744{
7745 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7746 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7747 if (pFpuCtx->FTW & RT_BIT(iReg))
7748 {
7749 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7750 return VINF_SUCCESS;
7751 }
7752 return VERR_NOT_FOUND;
7753}
7754
7755
7756IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7757 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7758{
7759 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7760 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7761 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7762 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7763 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7764 {
7765 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7766 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7767 return VINF_SUCCESS;
7768 }
7769 return VERR_NOT_FOUND;
7770}
7771
7772
7773IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7774{
7775 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7776 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7777 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7778 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7779 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7780 {
7781 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788/**
7789 * Updates the FPU exception status after FCW is changed.
7790 *
7791 * @param pFpuCtx The FPU context.
7792 */
7793IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7794{
7795 uint16_t u16Fsw = pFpuCtx->FSW;
7796 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7797 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7798 else
7799 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7800 pFpuCtx->FSW = u16Fsw;
7801}
7802
7803
7804/**
7805 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7806 *
7807 * @returns The full FTW.
7808 * @param pFpuCtx The FPU context.
7809 */
7810IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7811{
7812 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7813 uint16_t u16Ftw = 0;
7814 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7815 for (unsigned iSt = 0; iSt < 8; iSt++)
7816 {
7817 unsigned const iReg = (iSt + iTop) & 7;
7818 if (!(u8Ftw & RT_BIT(iReg)))
7819 u16Ftw |= 3 << (iReg * 2); /* empty */
7820 else
7821 {
7822 uint16_t uTag;
7823 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7824 if (pr80Reg->s.uExponent == 0x7fff)
7825 uTag = 2; /* Exponent is all 1's => Special. */
7826 else if (pr80Reg->s.uExponent == 0x0000)
7827 {
7828 if (pr80Reg->s.u64Mantissa == 0x0000)
7829 uTag = 1; /* All bits are zero => Zero. */
7830 else
7831 uTag = 2; /* Must be special. */
7832 }
7833 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7834 uTag = 0; /* Valid. */
7835 else
7836 uTag = 2; /* Must be special. */
7837
7838 u16Ftw |= uTag << (iReg * 2); /* empty */
7839 }
7840 }
7841
7842 return u16Ftw;
7843}
7844
7845
7846/**
7847 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7848 *
7849 * @returns The compressed FTW.
7850 * @param u16FullFtw The full FTW to convert.
7851 */
7852IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7853{
7854 uint8_t u8Ftw = 0;
7855 for (unsigned i = 0; i < 8; i++)
7856 {
7857 if ((u16FullFtw & 3) != 3 /*empty*/)
7858 u8Ftw |= RT_BIT(i);
7859 u16FullFtw >>= 2;
7860 }
7861
7862 return u8Ftw;
7863}
7864
7865/** @} */
7866
7867
7868/** @name Memory access.
7869 *
7870 * @{
7871 */
7872
7873
7874/**
7875 * Updates the IEMCPU::cbWritten counter if applicable.
7876 *
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param fAccess The access being accounted for.
7879 * @param cbMem The access size.
7880 */
7881DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7882{
7883 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7884 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7885 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7886}
7887
7888
7889/**
7890 * Checks if the given segment can be written to, raise the appropriate
7891 * exception if not.
7892 *
7893 * @returns VBox strict status code.
7894 *
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param pHid Pointer to the hidden register.
7897 * @param iSegReg The register number.
7898 * @param pu64BaseAddr Where to return the base address to use for the
7899 * segment. (In 64-bit code it may differ from the
7900 * base in the hidden segment.)
7901 */
7902IEM_STATIC VBOXSTRICTRC
7903iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7904{
7905 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7906 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7907 else
7908 {
7909 if (!pHid->Attr.n.u1Present)
7910 {
7911 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7912 AssertRelease(uSel == 0);
7913 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7914 return iemRaiseGeneralProtectionFault0(pVCpu);
7915 }
7916
7917 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7918 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7919 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7920 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7921 *pu64BaseAddr = pHid->u64Base;
7922 }
7923 return VINF_SUCCESS;
7924}
7925
7926
7927/**
7928 * Checks if the given segment can be read from, raise the appropriate
7929 * exception if not.
7930 *
7931 * @returns VBox strict status code.
7932 *
7933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7934 * @param pHid Pointer to the hidden register.
7935 * @param iSegReg The register number.
7936 * @param pu64BaseAddr Where to return the base address to use for the
7937 * segment. (In 64-bit code it may differ from the
7938 * base in the hidden segment.)
7939 */
7940IEM_STATIC VBOXSTRICTRC
7941iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7942{
7943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7944 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7945 else
7946 {
7947 if (!pHid->Attr.n.u1Present)
7948 {
7949 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7950 AssertRelease(uSel == 0);
7951 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7952 return iemRaiseGeneralProtectionFault0(pVCpu);
7953 }
7954
7955 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7956 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7957 *pu64BaseAddr = pHid->u64Base;
7958 }
7959 return VINF_SUCCESS;
7960}
7961
7962
7963/**
7964 * Applies the segment limit, base and attributes.
7965 *
7966 * This may raise a \#GP or \#SS.
7967 *
7968 * @returns VBox strict status code.
7969 *
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param fAccess The kind of access which is being performed.
7972 * @param iSegReg The index of the segment register to apply.
7973 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7974 * TSS, ++).
7975 * @param cbMem The access size.
7976 * @param pGCPtrMem Pointer to the guest memory address to apply
7977 * segmentation to. Input and output parameter.
7978 */
7979IEM_STATIC VBOXSTRICTRC
7980iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7981{
7982 if (iSegReg == UINT8_MAX)
7983 return VINF_SUCCESS;
7984
7985 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7986 switch (pVCpu->iem.s.enmCpuMode)
7987 {
7988 case IEMMODE_16BIT:
7989 case IEMMODE_32BIT:
7990 {
7991 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7992 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7993
7994 if ( pSel->Attr.n.u1Present
7995 && !pSel->Attr.n.u1Unusable)
7996 {
7997 Assert(pSel->Attr.n.u1DescType);
7998 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7999 {
8000 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8001 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8002 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8003
8004 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8005 {
8006 /** @todo CPL check. */
8007 }
8008
8009 /*
8010 * There are two kinds of data selectors, normal and expand down.
8011 */
8012 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8013 {
8014 if ( GCPtrFirst32 > pSel->u32Limit
8015 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8016 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8017 }
8018 else
8019 {
8020 /*
8021 * The upper boundary is defined by the B bit, not the G bit!
8022 */
8023 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8024 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8025 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8026 }
8027 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8028 }
8029 else
8030 {
8031
8032 /*
8033 * Code selector and usually be used to read thru, writing is
8034 * only permitted in real and V8086 mode.
8035 */
8036 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8037 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8038 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8039 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8040 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8041
8042 if ( GCPtrFirst32 > pSel->u32Limit
8043 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8044 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8045
8046 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8047 {
8048 /** @todo CPL check. */
8049 }
8050
8051 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8052 }
8053 }
8054 else
8055 return iemRaiseGeneralProtectionFault0(pVCpu);
8056 return VINF_SUCCESS;
8057 }
8058
8059 case IEMMODE_64BIT:
8060 {
8061 RTGCPTR GCPtrMem = *pGCPtrMem;
8062 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8063 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8064
8065 Assert(cbMem >= 1);
8066 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8067 return VINF_SUCCESS;
8068 return iemRaiseGeneralProtectionFault0(pVCpu);
8069 }
8070
8071 default:
8072 AssertFailedReturn(VERR_IEM_IPE_7);
8073 }
8074}
8075
8076
8077/**
8078 * Translates a virtual address to a physical physical address and checks if we
8079 * can access the page as specified.
8080 *
8081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8082 * @param GCPtrMem The virtual address.
8083 * @param fAccess The intended access.
8084 * @param pGCPhysMem Where to return the physical address.
8085 */
8086IEM_STATIC VBOXSTRICTRC
8087iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8088{
8089 /** @todo Need a different PGM interface here. We're currently using
8090 * generic / REM interfaces. this won't cut it for R0 & RC. */
8091 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8092 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8093 RTGCPHYS GCPhys;
8094 uint64_t fFlags;
8095 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8096 if (RT_FAILURE(rc))
8097 {
8098 /** @todo Check unassigned memory in unpaged mode. */
8099 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8100 *pGCPhysMem = NIL_RTGCPHYS;
8101 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8102 }
8103
8104 /* If the page is writable and does not have the no-exec bit set, all
8105 access is allowed. Otherwise we'll have to check more carefully... */
8106 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8107 {
8108 /* Write to read only memory? */
8109 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8110 && !(fFlags & X86_PTE_RW)
8111 && ( (pVCpu->iem.s.uCpl == 3
8112 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8113 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8114 {
8115 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8116 *pGCPhysMem = NIL_RTGCPHYS;
8117 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8118 }
8119
8120 /* Kernel memory accessed by userland? */
8121 if ( !(fFlags & X86_PTE_US)
8122 && pVCpu->iem.s.uCpl == 3
8123 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8124 {
8125 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8128 }
8129
8130 /* Executing non-executable memory? */
8131 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8132 && (fFlags & X86_PTE_PAE_NX)
8133 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8138 VERR_ACCESS_DENIED);
8139 }
8140 }
8141
8142 /*
8143 * Set the dirty / access flags.
8144 * ASSUMES this is set when the address is translated rather than on committ...
8145 */
8146 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8147 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8148 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8149 {
8150 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8151 AssertRC(rc2);
8152 }
8153
8154 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8155 *pGCPhysMem = GCPhys;
8156 return VINF_SUCCESS;
8157}
8158
8159
8160
8161/**
8162 * Maps a physical page.
8163 *
8164 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8166 * @param GCPhysMem The physical address.
8167 * @param fAccess The intended access.
8168 * @param ppvMem Where to return the mapping address.
8169 * @param pLock The PGM lock.
8170 */
8171IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8172{
8173#ifdef IEM_VERIFICATION_MODE_FULL
8174 /* Force the alternative path so we can ignore writes. */
8175 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8176 {
8177 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8178 {
8179 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8180 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8181 if (RT_FAILURE(rc2))
8182 pVCpu->iem.s.fProblematicMemory = true;
8183 }
8184 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8185 }
8186#endif
8187#ifdef IEM_LOG_MEMORY_WRITES
8188 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8189 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8190#endif
8191#ifdef IEM_VERIFICATION_MODE_MINIMAL
8192 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8193#endif
8194
8195 /** @todo This API may require some improving later. A private deal with PGM
8196 * regarding locking and unlocking needs to be struct. A couple of TLBs
8197 * living in PGM, but with publicly accessible inlined access methods
8198 * could perhaps be an even better solution. */
8199 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8200 GCPhysMem,
8201 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8202 pVCpu->iem.s.fBypassHandlers,
8203 ppvMem,
8204 pLock);
8205 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8206 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8207
8208#ifdef IEM_VERIFICATION_MODE_FULL
8209 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8210 pVCpu->iem.s.fProblematicMemory = true;
8211#endif
8212 return rc;
8213}
8214
8215
8216/**
8217 * Unmap a page previously mapped by iemMemPageMap.
8218 *
8219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8220 * @param GCPhysMem The physical address.
8221 * @param fAccess The intended access.
8222 * @param pvMem What iemMemPageMap returned.
8223 * @param pLock The PGM lock.
8224 */
8225DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8226{
8227 NOREF(pVCpu);
8228 NOREF(GCPhysMem);
8229 NOREF(fAccess);
8230 NOREF(pvMem);
8231 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8232}
8233
8234
8235/**
8236 * Looks up a memory mapping entry.
8237 *
8238 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8240 * @param pvMem The memory address.
8241 * @param fAccess The access to.
8242 */
8243DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8244{
8245 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8246 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8247 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8248 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8249 return 0;
8250 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8251 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8252 return 1;
8253 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8254 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8255 return 2;
8256 return VERR_NOT_FOUND;
8257}
8258
8259
8260/**
8261 * Finds a free memmap entry when using iNextMapping doesn't work.
8262 *
8263 * @returns Memory mapping index, 1024 on failure.
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 */
8266IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8267{
8268 /*
8269 * The easy case.
8270 */
8271 if (pVCpu->iem.s.cActiveMappings == 0)
8272 {
8273 pVCpu->iem.s.iNextMapping = 1;
8274 return 0;
8275 }
8276
8277 /* There should be enough mappings for all instructions. */
8278 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8279
8280 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8281 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8282 return i;
8283
8284 AssertFailedReturn(1024);
8285}
8286
8287
8288/**
8289 * Commits a bounce buffer that needs writing back and unmaps it.
8290 *
8291 * @returns Strict VBox status code.
8292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8293 * @param iMemMap The index of the buffer to commit.
8294 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8295 * Always false in ring-3, obviously.
8296 */
8297IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8298{
8299 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8300 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8301#ifdef IN_RING3
8302 Assert(!fPostponeFail);
8303 RT_NOREF_PV(fPostponeFail);
8304#endif
8305
8306 /*
8307 * Do the writing.
8308 */
8309#ifndef IEM_VERIFICATION_MODE_MINIMAL
8310 PVM pVM = pVCpu->CTX_SUFF(pVM);
8311 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8312 && !IEM_VERIFICATION_ENABLED(pVCpu))
8313 {
8314 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8315 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8316 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8317 if (!pVCpu->iem.s.fBypassHandlers)
8318 {
8319 /*
8320 * Carefully and efficiently dealing with access handler return
8321 * codes make this a little bloated.
8322 */
8323 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8324 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8325 pbBuf,
8326 cbFirst,
8327 PGMACCESSORIGIN_IEM);
8328 if (rcStrict == VINF_SUCCESS)
8329 {
8330 if (cbSecond)
8331 {
8332 rcStrict = PGMPhysWrite(pVM,
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8334 pbBuf + cbFirst,
8335 cbSecond,
8336 PGMACCESSORIGIN_IEM);
8337 if (rcStrict == VINF_SUCCESS)
8338 { /* nothing */ }
8339 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8340 {
8341 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8344 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8345 }
8346# ifndef IN_RING3
8347 else if (fPostponeFail)
8348 {
8349 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8350 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8352 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8353 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8354 return iemSetPassUpStatus(pVCpu, rcStrict);
8355 }
8356# endif
8357 else
8358 {
8359 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8362 return rcStrict;
8363 }
8364 }
8365 }
8366 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8367 {
8368 if (!cbSecond)
8369 {
8370 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8373 }
8374 else
8375 {
8376 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8378 pbBuf + cbFirst,
8379 cbSecond,
8380 PGMACCESSORIGIN_IEM);
8381 if (rcStrict2 == VINF_SUCCESS)
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8386 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8387 }
8388 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8393 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8394 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8395 }
8396# ifndef IN_RING3
8397 else if (fPostponeFail)
8398 {
8399 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8402 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8403 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8404 return iemSetPassUpStatus(pVCpu, rcStrict);
8405 }
8406# endif
8407 else
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8412 return rcStrict2;
8413 }
8414 }
8415 }
8416# ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 if (!cbSecond)
8423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8424 else
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8426 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8427 return iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429# endif
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rcStrict;
8436 }
8437 }
8438 else
8439 {
8440 /*
8441 * No access handlers, much simpler.
8442 */
8443 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8444 if (RT_SUCCESS(rc))
8445 {
8446 if (cbSecond)
8447 {
8448 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8449 if (RT_SUCCESS(rc))
8450 { /* likely */ }
8451 else
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8456 return rc;
8457 }
8458 }
8459 }
8460 else
8461 {
8462 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8465 return rc;
8466 }
8467 }
8468 }
8469#endif
8470
8471#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8472 /*
8473 * Record the write(s).
8474 */
8475 if (!pVCpu->iem.s.fNoRem)
8476 {
8477 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8478 if (pEvtRec)
8479 {
8480 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8481 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8482 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8483 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8484 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8485 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8486 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8487 }
8488 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8489 {
8490 pEvtRec = iemVerifyAllocRecord(pVCpu);
8491 if (pEvtRec)
8492 {
8493 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8494 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8495 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8496 memcpy(pEvtRec->u.RamWrite.ab,
8497 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8498 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8499 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8500 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8501 }
8502 }
8503 }
8504#endif
8505#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8506 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8507 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8508 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8509 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8510 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8511 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8512
8513 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8514 g_cbIemWrote = cbWrote;
8515 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8516#endif
8517
8518 /*
8519 * Free the mapping entry.
8520 */
8521 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8522 Assert(pVCpu->iem.s.cActiveMappings != 0);
8523 pVCpu->iem.s.cActiveMappings--;
8524 return VINF_SUCCESS;
8525}
8526
8527
8528/**
8529 * iemMemMap worker that deals with a request crossing pages.
8530 */
8531IEM_STATIC VBOXSTRICTRC
8532iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8533{
8534 /*
8535 * Do the address translations.
8536 */
8537 RTGCPHYS GCPhysFirst;
8538 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8539 if (rcStrict != VINF_SUCCESS)
8540 return rcStrict;
8541
8542 RTGCPHYS GCPhysSecond;
8543 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8544 fAccess, &GCPhysSecond);
8545 if (rcStrict != VINF_SUCCESS)
8546 return rcStrict;
8547 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8548
8549 PVM pVM = pVCpu->CTX_SUFF(pVM);
8550#ifdef IEM_VERIFICATION_MODE_FULL
8551 /*
8552 * Detect problematic memory when verifying so we can select
8553 * the right execution engine. (TLB: Redo this.)
8554 */
8555 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8556 {
8557 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8558 if (RT_SUCCESS(rc2))
8559 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8560 if (RT_FAILURE(rc2))
8561 pVCpu->iem.s.fProblematicMemory = true;
8562 }
8563#endif
8564
8565
8566 /*
8567 * Read in the current memory content if it's a read, execute or partial
8568 * write access.
8569 */
8570 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8571 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8572 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8573
8574 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8575 {
8576 if (!pVCpu->iem.s.fBypassHandlers)
8577 {
8578 /*
8579 * Must carefully deal with access handler status codes here,
8580 * makes the code a bit bloated.
8581 */
8582 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8583 if (rcStrict == VINF_SUCCESS)
8584 {
8585 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8586 if (rcStrict == VINF_SUCCESS)
8587 { /*likely */ }
8588 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8589 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8590 else
8591 {
8592 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8593 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8594 return rcStrict;
8595 }
8596 }
8597 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8598 {
8599 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8600 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8601 {
8602 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8603 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8604 }
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8608 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8609 return rcStrict2;
8610 }
8611 }
8612 else
8613 {
8614 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8615 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8616 return rcStrict;
8617 }
8618 }
8619 else
8620 {
8621 /*
8622 * No informational status codes here, much more straight forward.
8623 */
8624 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8625 if (RT_SUCCESS(rc))
8626 {
8627 Assert(rc == VINF_SUCCESS);
8628 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8629 if (RT_SUCCESS(rc))
8630 Assert(rc == VINF_SUCCESS);
8631 else
8632 {
8633 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8634 return rc;
8635 }
8636 }
8637 else
8638 {
8639 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8640 return rc;
8641 }
8642 }
8643
8644#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8645 if ( !pVCpu->iem.s.fNoRem
8646 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8647 {
8648 /*
8649 * Record the reads.
8650 */
8651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8652 if (pEvtRec)
8653 {
8654 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8655 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8656 pEvtRec->u.RamRead.cb = cbFirstPage;
8657 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8658 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8659 }
8660 pEvtRec = iemVerifyAllocRecord(pVCpu);
8661 if (pEvtRec)
8662 {
8663 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8664 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8665 pEvtRec->u.RamRead.cb = cbSecondPage;
8666 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8667 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8668 }
8669 }
8670#endif
8671 }
8672#ifdef VBOX_STRICT
8673 else
8674 memset(pbBuf, 0xcc, cbMem);
8675 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8676 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8677#endif
8678
8679 /*
8680 * Commit the bounce buffer entry.
8681 */
8682 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8683 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8684 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8686 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8687 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8688 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8689 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8690 pVCpu->iem.s.cActiveMappings++;
8691
8692 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8693 *ppvMem = pbBuf;
8694 return VINF_SUCCESS;
8695}
8696
8697
8698/**
8699 * iemMemMap woker that deals with iemMemPageMap failures.
8700 */
8701IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8702 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8703{
8704 /*
8705 * Filter out conditions we can handle and the ones which shouldn't happen.
8706 */
8707 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8708 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8709 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8710 {
8711 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8712 return rcMap;
8713 }
8714 pVCpu->iem.s.cPotentialExits++;
8715
8716 /*
8717 * Read in the current memory content if it's a read, execute or partial
8718 * write access.
8719 */
8720 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8721 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8722 {
8723 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8724 memset(pbBuf, 0xff, cbMem);
8725 else
8726 {
8727 int rc;
8728 if (!pVCpu->iem.s.fBypassHandlers)
8729 {
8730 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8731 if (rcStrict == VINF_SUCCESS)
8732 { /* nothing */ }
8733 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8734 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8735 else
8736 {
8737 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8738 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8739 return rcStrict;
8740 }
8741 }
8742 else
8743 {
8744 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8745 if (RT_SUCCESS(rc))
8746 { /* likely */ }
8747 else
8748 {
8749 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8750 GCPhysFirst, rc));
8751 return rc;
8752 }
8753 }
8754 }
8755
8756#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8757 if ( !pVCpu->iem.s.fNoRem
8758 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8759 {
8760 /*
8761 * Record the read.
8762 */
8763 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8764 if (pEvtRec)
8765 {
8766 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8767 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8768 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8769 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8770 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8771 }
8772 }
8773#endif
8774 }
8775#ifdef VBOX_STRICT
8776 else
8777 memset(pbBuf, 0xcc, cbMem);
8778#endif
8779#ifdef VBOX_STRICT
8780 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8781 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8782#endif
8783
8784 /*
8785 * Commit the bounce buffer entry.
8786 */
8787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8789 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8790 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8791 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8792 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8794 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8795 pVCpu->iem.s.cActiveMappings++;
8796
8797 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8798 *ppvMem = pbBuf;
8799 return VINF_SUCCESS;
8800}
8801
8802
8803
8804/**
8805 * Maps the specified guest memory for the given kind of access.
8806 *
8807 * This may be using bounce buffering of the memory if it's crossing a page
8808 * boundary or if there is an access handler installed for any of it. Because
8809 * of lock prefix guarantees, we're in for some extra clutter when this
8810 * happens.
8811 *
8812 * This may raise a \#GP, \#SS, \#PF or \#AC.
8813 *
8814 * @returns VBox strict status code.
8815 *
8816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8817 * @param ppvMem Where to return the pointer to the mapped
8818 * memory.
8819 * @param cbMem The number of bytes to map. This is usually 1,
8820 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8821 * string operations it can be up to a page.
8822 * @param iSegReg The index of the segment register to use for
8823 * this access. The base and limits are checked.
8824 * Use UINT8_MAX to indicate that no segmentation
8825 * is required (for IDT, GDT and LDT accesses).
8826 * @param GCPtrMem The address of the guest memory.
8827 * @param fAccess How the memory is being accessed. The
8828 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8829 * how to map the memory, while the
8830 * IEM_ACCESS_WHAT_XXX bit is used when raising
8831 * exceptions.
8832 */
8833IEM_STATIC VBOXSTRICTRC
8834iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8835{
8836 /*
8837 * Check the input and figure out which mapping entry to use.
8838 */
8839 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8840 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8841 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8842
8843 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8844 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8845 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8846 {
8847 iMemMap = iemMemMapFindFree(pVCpu);
8848 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8849 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8850 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8851 pVCpu->iem.s.aMemMappings[2].fAccess),
8852 VERR_IEM_IPE_9);
8853 }
8854
8855 /*
8856 * Map the memory, checking that we can actually access it. If something
8857 * slightly complicated happens, fall back on bounce buffering.
8858 */
8859 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8860 if (rcStrict != VINF_SUCCESS)
8861 return rcStrict;
8862
8863 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8864 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8865
8866 RTGCPHYS GCPhysFirst;
8867 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8868 if (rcStrict != VINF_SUCCESS)
8869 return rcStrict;
8870
8871 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8872 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8873 if (fAccess & IEM_ACCESS_TYPE_READ)
8874 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8875
8876 void *pvMem;
8877 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8878 if (rcStrict != VINF_SUCCESS)
8879 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8880
8881 /*
8882 * Fill in the mapping table entry.
8883 */
8884 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8886 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8887 pVCpu->iem.s.cActiveMappings++;
8888
8889 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8890 *ppvMem = pvMem;
8891 return VINF_SUCCESS;
8892}
8893
8894
8895/**
8896 * Commits the guest memory if bounce buffered and unmaps it.
8897 *
8898 * @returns Strict VBox status code.
8899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8900 * @param pvMem The mapping.
8901 * @param fAccess The kind of access.
8902 */
8903IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8904{
8905 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8906 AssertReturn(iMemMap >= 0, iMemMap);
8907
8908 /* If it's bounce buffered, we may need to write back the buffer. */
8909 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8910 {
8911 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8912 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8913 }
8914 /* Otherwise unlock it. */
8915 else
8916 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8917
8918 /* Free the entry. */
8919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8920 Assert(pVCpu->iem.s.cActiveMappings != 0);
8921 pVCpu->iem.s.cActiveMappings--;
8922 return VINF_SUCCESS;
8923}
8924
8925#ifdef IEM_WITH_SETJMP
8926
8927/**
8928 * Maps the specified guest memory for the given kind of access, longjmp on
8929 * error.
8930 *
8931 * This may be using bounce buffering of the memory if it's crossing a page
8932 * boundary or if there is an access handler installed for any of it. Because
8933 * of lock prefix guarantees, we're in for some extra clutter when this
8934 * happens.
8935 *
8936 * This may raise a \#GP, \#SS, \#PF or \#AC.
8937 *
8938 * @returns Pointer to the mapped memory.
8939 *
8940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8941 * @param cbMem The number of bytes to map. This is usually 1,
8942 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8943 * string operations it can be up to a page.
8944 * @param iSegReg The index of the segment register to use for
8945 * this access. The base and limits are checked.
8946 * Use UINT8_MAX to indicate that no segmentation
8947 * is required (for IDT, GDT and LDT accesses).
8948 * @param GCPtrMem The address of the guest memory.
8949 * @param fAccess How the memory is being accessed. The
8950 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8951 * how to map the memory, while the
8952 * IEM_ACCESS_WHAT_XXX bit is used when raising
8953 * exceptions.
8954 */
8955IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8956{
8957 /*
8958 * Check the input and figure out which mapping entry to use.
8959 */
8960 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8961 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8962 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8963
8964 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8965 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8966 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8967 {
8968 iMemMap = iemMemMapFindFree(pVCpu);
8969 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8970 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8971 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8972 pVCpu->iem.s.aMemMappings[2].fAccess),
8973 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8974 }
8975
8976 /*
8977 * Map the memory, checking that we can actually access it. If something
8978 * slightly complicated happens, fall back on bounce buffering.
8979 */
8980 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8981 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8982 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8983
8984 /* Crossing a page boundary? */
8985 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8986 { /* No (likely). */ }
8987 else
8988 {
8989 void *pvMem;
8990 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8991 if (rcStrict == VINF_SUCCESS)
8992 return pvMem;
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8994 }
8995
8996 RTGCPHYS GCPhysFirst;
8997 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8998 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8999 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000
9001 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9002 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9003 if (fAccess & IEM_ACCESS_TYPE_READ)
9004 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9005
9006 void *pvMem;
9007 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9008 if (rcStrict == VINF_SUCCESS)
9009 { /* likely */ }
9010 else
9011 {
9012 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9013 if (rcStrict == VINF_SUCCESS)
9014 return pvMem;
9015 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016 }
9017
9018 /*
9019 * Fill in the mapping table entry.
9020 */
9021 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9023 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9024 pVCpu->iem.s.cActiveMappings++;
9025
9026 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9027 return pvMem;
9028}
9029
9030
9031/**
9032 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9033 *
9034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9035 * @param pvMem The mapping.
9036 * @param fAccess The kind of access.
9037 */
9038IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9039{
9040 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9041 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9042
9043 /* If it's bounce buffered, we may need to write back the buffer. */
9044 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9045 {
9046 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9047 {
9048 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9049 if (rcStrict == VINF_SUCCESS)
9050 return;
9051 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9052 }
9053 }
9054 /* Otherwise unlock it. */
9055 else
9056 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9057
9058 /* Free the entry. */
9059 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9060 Assert(pVCpu->iem.s.cActiveMappings != 0);
9061 pVCpu->iem.s.cActiveMappings--;
9062}
9063
9064#endif
9065
9066#ifndef IN_RING3
9067/**
9068 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9069 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9070 *
9071 * Allows the instruction to be completed and retired, while the IEM user will
9072 * return to ring-3 immediately afterwards and do the postponed writes there.
9073 *
9074 * @returns VBox status code (no strict statuses). Caller must check
9075 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9077 * @param pvMem The mapping.
9078 * @param fAccess The kind of access.
9079 */
9080IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9081{
9082 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9083 AssertReturn(iMemMap >= 0, iMemMap);
9084
9085 /* If it's bounce buffered, we may need to write back the buffer. */
9086 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9087 {
9088 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9089 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9090 }
9091 /* Otherwise unlock it. */
9092 else
9093 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9094
9095 /* Free the entry. */
9096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9097 Assert(pVCpu->iem.s.cActiveMappings != 0);
9098 pVCpu->iem.s.cActiveMappings--;
9099 return VINF_SUCCESS;
9100}
9101#endif
9102
9103
9104/**
9105 * Rollbacks mappings, releasing page locks and such.
9106 *
9107 * The caller shall only call this after checking cActiveMappings.
9108 *
9109 * @returns Strict VBox status code to pass up.
9110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9111 */
9112IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9113{
9114 Assert(pVCpu->iem.s.cActiveMappings > 0);
9115
9116 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9117 while (iMemMap-- > 0)
9118 {
9119 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9120 if (fAccess != IEM_ACCESS_INVALID)
9121 {
9122 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9123 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9124 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9125 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9126 Assert(pVCpu->iem.s.cActiveMappings > 0);
9127 pVCpu->iem.s.cActiveMappings--;
9128 }
9129 }
9130}
9131
9132
9133/**
9134 * Fetches a data byte.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu8Dst Where to return the byte.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint8_t const *pu8Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu8Dst = *pu8Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Fetches a data byte, longjmp on error.
9160 *
9161 * @returns The byte.
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9171 uint8_t const bRet = *pu8Src;
9172 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9173 return bRet;
9174}
9175#endif /* IEM_WITH_SETJMP */
9176
9177
9178/**
9179 * Fetches a data word.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu16Dst Where to return the word.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint16_t const *pu16Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu16Dst = *pu16Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203/**
9204 * Fetches a data word, longjmp on error.
9205 *
9206 * @returns The word
9207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9208 * @param iSegReg The index of the segment register to use for
9209 * this access. The base and limits are checked.
9210 * @param GCPtrMem The address of the guest memory.
9211 */
9212DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9213{
9214 /* The lazy approach for now... */
9215 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9216 uint16_t const u16Ret = *pu16Src;
9217 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9218 return u16Ret;
9219}
9220#endif
9221
9222
9223/**
9224 * Fetches a data dword.
9225 *
9226 * @returns Strict VBox status code.
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param pu32Dst Where to return the dword.
9229 * @param iSegReg The index of the segment register to use for
9230 * this access. The base and limits are checked.
9231 * @param GCPtrMem The address of the guest memory.
9232 */
9233IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9234{
9235 /* The lazy approach for now... */
9236 uint32_t const *pu32Src;
9237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9238 if (rc == VINF_SUCCESS)
9239 {
9240 *pu32Dst = *pu32Src;
9241 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9242 }
9243 return rc;
9244}
9245
9246
9247#ifdef IEM_WITH_SETJMP
9248
9249IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9250{
9251 Assert(cbMem >= 1);
9252 Assert(iSegReg < X86_SREG_COUNT);
9253
9254 /*
9255 * 64-bit mode is simpler.
9256 */
9257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9258 {
9259 if (iSegReg >= X86_SREG_FS)
9260 {
9261 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9262 GCPtrMem += pSel->u64Base;
9263 }
9264
9265 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9266 return GCPtrMem;
9267 }
9268 /*
9269 * 16-bit and 32-bit segmentation.
9270 */
9271 else
9272 {
9273 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9274 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9275 == X86DESCATTR_P /* data, expand up */
9276 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9277 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9278 {
9279 /* expand up */
9280 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9281 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9282 && GCPtrLast32 > (uint32_t)GCPtrMem))
9283 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9284 }
9285 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9286 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9287 {
9288 /* expand down */
9289 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9290 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9291 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9292 && GCPtrLast32 > (uint32_t)GCPtrMem))
9293 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9294 }
9295 else
9296 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9297 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9298 }
9299 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9300}
9301
9302
9303IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9304{
9305 Assert(cbMem >= 1);
9306 Assert(iSegReg < X86_SREG_COUNT);
9307
9308 /*
9309 * 64-bit mode is simpler.
9310 */
9311 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9312 {
9313 if (iSegReg >= X86_SREG_FS)
9314 {
9315 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9316 GCPtrMem += pSel->u64Base;
9317 }
9318
9319 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9320 return GCPtrMem;
9321 }
9322 /*
9323 * 16-bit and 32-bit segmentation.
9324 */
9325 else
9326 {
9327 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9328 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9329 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9330 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9331 {
9332 /* expand up */
9333 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9334 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9339 {
9340 /* expand down */
9341 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9342 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9343 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9344 && GCPtrLast32 > (uint32_t)GCPtrMem))
9345 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9346 }
9347 else
9348 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9349 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9350 }
9351 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9352}
9353
9354
9355/**
9356 * Fetches a data dword, longjmp on error, fallback/safe version.
9357 *
9358 * @returns The dword
9359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9360 * @param iSegReg The index of the segment register to use for
9361 * this access. The base and limits are checked.
9362 * @param GCPtrMem The address of the guest memory.
9363 */
9364IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9365{
9366 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9367 uint32_t const u32Ret = *pu32Src;
9368 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9369 return u32Ret;
9370}
9371
9372
9373/**
9374 * Fetches a data dword, longjmp on error.
9375 *
9376 * @returns The dword
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384# ifdef IEM_WITH_DATA_TLB
9385 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9386 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9387 {
9388 /// @todo more later.
9389 }
9390
9391 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9392# else
9393 /* The lazy approach. */
9394 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9395 uint32_t const u32Ret = *pu32Src;
9396 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9397 return u32Ret;
9398# endif
9399}
9400#endif
9401
9402
9403#ifdef SOME_UNUSED_FUNCTION
9404/**
9405 * Fetches a data dword and sign extends it to a qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param pu64Dst Where to return the sign extended value.
9410 * @param iSegReg The index of the segment register to use for
9411 * this access. The base and limits are checked.
9412 * @param GCPtrMem The address of the guest memory.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9415{
9416 /* The lazy approach for now... */
9417 int32_t const *pi32Src;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = *pi32Src;
9422 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9423 }
9424#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9425 else
9426 *pu64Dst = 0;
9427#endif
9428 return rc;
9429}
9430#endif
9431
9432
9433/**
9434 * Fetches a data qword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param pu64Dst Where to return the qword.
9439 * @param iSegReg The index of the segment register to use for
9440 * this access. The base and limits are checked.
9441 * @param GCPtrMem The address of the guest memory.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9444{
9445 /* The lazy approach for now... */
9446 uint64_t const *pu64Src;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = *pu64Src;
9451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9452 }
9453 return rc;
9454}
9455
9456
9457#ifdef IEM_WITH_SETJMP
9458/**
9459 * Fetches a data qword, longjmp on error.
9460 *
9461 * @returns The qword.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param iSegReg The index of the segment register to use for
9464 * this access. The base and limits are checked.
9465 * @param GCPtrMem The address of the guest memory.
9466 */
9467DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9468{
9469 /* The lazy approach for now... */
9470 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9471 uint64_t const u64Ret = *pu64Src;
9472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9473 return u64Ret;
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu64Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9492 if (RT_UNLIKELY(GCPtrMem & 15))
9493 return iemRaiseGeneralProtectionFault0(pVCpu);
9494
9495 uint64_t const *pu64Src;
9496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 if (rc == VINF_SUCCESS)
9498 {
9499 *pu64Dst = *pu64Src;
9500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9501 }
9502 return rc;
9503}
9504
9505
9506#ifdef IEM_WITH_SETJMP
9507/**
9508 * Fetches a data qword, longjmp on error.
9509 *
9510 * @returns The qword.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 */
9516DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9517{
9518 /* The lazy approach for now... */
9519 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9520 if (RT_LIKELY(!(GCPtrMem & 15)))
9521 {
9522 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9523 uint64_t const u64Ret = *pu64Src;
9524 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9525 return u64Ret;
9526 }
9527
9528 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9529 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9530}
9531#endif
9532
9533
9534/**
9535 * Fetches a data tword.
9536 *
9537 * @returns Strict VBox status code.
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param pr80Dst Where to return the tword.
9540 * @param iSegReg The index of the segment register to use for
9541 * this access. The base and limits are checked.
9542 * @param GCPtrMem The address of the guest memory.
9543 */
9544IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /* The lazy approach for now... */
9547 PCRTFLOAT80U pr80Src;
9548 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9549 if (rc == VINF_SUCCESS)
9550 {
9551 *pr80Dst = *pr80Src;
9552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9553 }
9554 return rc;
9555}
9556
9557
9558#ifdef IEM_WITH_SETJMP
9559/**
9560 * Fetches a data tword, longjmp on error.
9561 *
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pr80Dst Where to return the tword.
9564 * @param iSegReg The index of the segment register to use for
9565 * this access. The base and limits are checked.
9566 * @param GCPtrMem The address of the guest memory.
9567 */
9568DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9569{
9570 /* The lazy approach for now... */
9571 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9572 *pr80Dst = *pr80Src;
9573 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9574}
9575#endif
9576
9577
9578/**
9579 * Fetches a data dqword (double qword), generally SSE related.
9580 *
9581 * @returns Strict VBox status code.
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pu128Dst Where to return the qword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTUINT128U pu128Src;
9592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9593 if (rc == VINF_SUCCESS)
9594 {
9595 pu128Dst->au64[0] = pu128Src->au64[0];
9596 pu128Dst->au64[1] = pu128Src->au64[1];
9597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9598 }
9599 return rc;
9600}
9601
9602
9603#ifdef IEM_WITH_SETJMP
9604/**
9605 * Fetches a data dqword (double qword), generally SSE related.
9606 *
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pu128Dst Where to return the qword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 pu128Dst->au64[0] = pu128Src->au64[0];
9618 pu128Dst->au64[1] = pu128Src->au64[1];
9619 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9620}
9621#endif
9622
9623
9624/**
9625 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9626 * related.
9627 *
9628 * Raises \#GP(0) if not aligned.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param pu128Dst Where to return the qword.
9633 * @param iSegReg The index of the segment register to use for
9634 * this access. The base and limits are checked.
9635 * @param GCPtrMem The address of the guest memory.
9636 */
9637IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9638{
9639 /* The lazy approach for now... */
9640 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9641 if ( (GCPtrMem & 15)
9642 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9643 return iemRaiseGeneralProtectionFault0(pVCpu);
9644
9645 PCRTUINT128U pu128Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 pu128Dst->au64[0] = pu128Src->au64[0];
9650 pu128Dst->au64[1] = pu128Src->au64[1];
9651 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9652 }
9653 return rc;
9654}
9655
9656
9657#ifdef IEM_WITH_SETJMP
9658/**
9659 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9660 * related, longjmp on error.
9661 *
9662 * Raises \#GP(0) if not aligned.
9663 *
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu128Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9674 if ( (GCPtrMem & 15) == 0
9675 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9676 {
9677 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9678 pu128Dst->au64[0] = pu128Src->au64[0];
9679 pu128Dst->au64[1] = pu128Src->au64[1];
9680 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9681 return;
9682 }
9683
9684 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9685 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9686}
9687#endif
9688
9689
9690/**
9691 * Fetches a data oword (octo word), generally AVX related.
9692 *
9693 * @returns Strict VBox status code.
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu256Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 PCRTUINT256U pu256Src;
9704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9705 if (rc == VINF_SUCCESS)
9706 {
9707 pu256Dst->au64[0] = pu256Src->au64[0];
9708 pu256Dst->au64[1] = pu256Src->au64[1];
9709 pu256Dst->au64[2] = pu256Src->au64[2];
9710 pu256Dst->au64[3] = pu256Src->au64[3];
9711 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9712 }
9713 return rc;
9714}
9715
9716
9717#ifdef IEM_WITH_SETJMP
9718/**
9719 * Fetches a data oword (octo word), generally AVX related.
9720 *
9721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9722 * @param pu256Dst Where to return the qword.
9723 * @param iSegReg The index of the segment register to use for
9724 * this access. The base and limits are checked.
9725 * @param GCPtrMem The address of the guest memory.
9726 */
9727IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9728{
9729 /* The lazy approach for now... */
9730 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9731 pu256Dst->au64[0] = pu256Src->au64[0];
9732 pu256Dst->au64[1] = pu256Src->au64[1];
9733 pu256Dst->au64[2] = pu256Src->au64[2];
9734 pu256Dst->au64[3] = pu256Src->au64[3];
9735 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9736}
9737#endif
9738
9739
9740/**
9741 * Fetches a data oword (octo word) at an aligned address, generally AVX
9742 * related.
9743 *
9744 * Raises \#GP(0) if not aligned.
9745 *
9746 * @returns Strict VBox status code.
9747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9748 * @param pu256Dst Where to return the qword.
9749 * @param iSegReg The index of the segment register to use for
9750 * this access. The base and limits are checked.
9751 * @param GCPtrMem The address of the guest memory.
9752 */
9753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9754{
9755 /* The lazy approach for now... */
9756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9757 if (GCPtrMem & 31)
9758 return iemRaiseGeneralProtectionFault0(pVCpu);
9759
9760 PCRTUINT256U pu256Src;
9761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9762 if (rc == VINF_SUCCESS)
9763 {
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769 }
9770 return rc;
9771}
9772
9773
9774#ifdef IEM_WITH_SETJMP
9775/**
9776 * Fetches a data oword (octo word) at an aligned address, generally AVX
9777 * related, longjmp on error.
9778 *
9779 * Raises \#GP(0) if not aligned.
9780 *
9781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9782 * @param pu256Dst Where to return the qword.
9783 * @param iSegReg The index of the segment register to use for
9784 * this access. The base and limits are checked.
9785 * @param GCPtrMem The address of the guest memory.
9786 */
9787DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9788{
9789 /* The lazy approach for now... */
9790 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9791 if ((GCPtrMem & 31) == 0)
9792 {
9793 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9794 pu256Dst->au64[0] = pu256Src->au64[0];
9795 pu256Dst->au64[1] = pu256Src->au64[1];
9796 pu256Dst->au64[2] = pu256Src->au64[2];
9797 pu256Dst->au64[3] = pu256Src->au64[3];
9798 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9799 return;
9800 }
9801
9802 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9804}
9805#endif
9806
9807
9808
9809/**
9810 * Fetches a descriptor register (lgdt, lidt).
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9814 * @param pcbLimit Where to return the limit.
9815 * @param pGCPtrBase Where to return the base.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 * @param enmOpSize The effective operand size.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9822 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9823{
9824 /*
9825 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9826 * little special:
9827 * - The two reads are done separately.
9828 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9829 * - We suspect the 386 to actually commit the limit before the base in
9830 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9831 * don't try emulate this eccentric behavior, because it's not well
9832 * enough understood and rather hard to trigger.
9833 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9834 */
9835 VBOXSTRICTRC rcStrict;
9836 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9837 {
9838 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9841 }
9842 else
9843 {
9844 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9845 if (enmOpSize == IEMMODE_32BIT)
9846 {
9847 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9848 {
9849 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9850 if (rcStrict == VINF_SUCCESS)
9851 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9852 }
9853 else
9854 {
9855 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9856 if (rcStrict == VINF_SUCCESS)
9857 {
9858 *pcbLimit = (uint16_t)uTmp;
9859 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9860 }
9861 }
9862 if (rcStrict == VINF_SUCCESS)
9863 *pGCPtrBase = uTmp;
9864 }
9865 else
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 {
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9871 if (rcStrict == VINF_SUCCESS)
9872 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9873 }
9874 }
9875 }
9876 return rcStrict;
9877}
9878
9879
9880
9881/**
9882 * Stores a data byte.
9883 *
9884 * @returns Strict VBox status code.
9885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9886 * @param iSegReg The index of the segment register to use for
9887 * this access. The base and limits are checked.
9888 * @param GCPtrMem The address of the guest memory.
9889 * @param u8Value The value to store.
9890 */
9891IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9892{
9893 /* The lazy approach for now... */
9894 uint8_t *pu8Dst;
9895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9896 if (rc == VINF_SUCCESS)
9897 {
9898 *pu8Dst = u8Value;
9899 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9900 }
9901 return rc;
9902}
9903
9904
9905#ifdef IEM_WITH_SETJMP
9906/**
9907 * Stores a data byte, longjmp on error.
9908 *
9909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9910 * @param iSegReg The index of the segment register to use for
9911 * this access. The base and limits are checked.
9912 * @param GCPtrMem The address of the guest memory.
9913 * @param u8Value The value to store.
9914 */
9915IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9916{
9917 /* The lazy approach for now... */
9918 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9919 *pu8Dst = u8Value;
9920 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9921}
9922#endif
9923
9924
9925/**
9926 * Stores a data word.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u16Value The value to store.
9934 */
9935IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9936{
9937 /* The lazy approach for now... */
9938 uint16_t *pu16Dst;
9939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 if (rc == VINF_SUCCESS)
9941 {
9942 *pu16Dst = u16Value;
9943 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9944 }
9945 return rc;
9946}
9947
9948
9949#ifdef IEM_WITH_SETJMP
9950/**
9951 * Stores a data word, longjmp on error.
9952 *
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param iSegReg The index of the segment register to use for
9955 * this access. The base and limits are checked.
9956 * @param GCPtrMem The address of the guest memory.
9957 * @param u16Value The value to store.
9958 */
9959IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9960{
9961 /* The lazy approach for now... */
9962 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9963 *pu16Dst = u16Value;
9964 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9965}
9966#endif
9967
9968
9969/**
9970 * Stores a data dword.
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u32Value The value to store.
9978 */
9979IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9980{
9981 /* The lazy approach for now... */
9982 uint32_t *pu32Dst;
9983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9984 if (rc == VINF_SUCCESS)
9985 {
9986 *pu32Dst = u32Value;
9987 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9988 }
9989 return rc;
9990}
9991
9992
9993#ifdef IEM_WITH_SETJMP
9994/**
9995 * Stores a data dword.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param iSegReg The index of the segment register to use for
10000 * this access. The base and limits are checked.
10001 * @param GCPtrMem The address of the guest memory.
10002 * @param u32Value The value to store.
10003 */
10004IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10005{
10006 /* The lazy approach for now... */
10007 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10008 *pu32Dst = u32Value;
10009 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10010}
10011#endif
10012
10013
10014/**
10015 * Stores a data qword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u64Value The value to store.
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10025{
10026 /* The lazy approach for now... */
10027 uint64_t *pu64Dst;
10028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 if (rc == VINF_SUCCESS)
10030 {
10031 *pu64Dst = u64Value;
10032 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10033 }
10034 return rc;
10035}
10036
10037
10038#ifdef IEM_WITH_SETJMP
10039/**
10040 * Stores a data qword, longjmp on error.
10041 *
10042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10043 * @param iSegReg The index of the segment register to use for
10044 * this access. The base and limits are checked.
10045 * @param GCPtrMem The address of the guest memory.
10046 * @param u64Value The value to store.
10047 */
10048IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10049{
10050 /* The lazy approach for now... */
10051 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10052 *pu64Dst = u64Value;
10053 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10054}
10055#endif
10056
10057
10058/**
10059 * Stores a data dqword.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u128Value The value to store.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10069{
10070 /* The lazy approach for now... */
10071 PRTUINT128U pu128Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu128Dst->au64[0] = u128Value.au64[0];
10076 pu128Dst->au64[1] = u128Value.au64[1];
10077 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data dqword, longjmp on error.
10086 *
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param u128Value The value to store.
10092 */
10093IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10094{
10095 /* The lazy approach for now... */
10096 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 pu128Dst->au64[0] = u128Value.au64[0];
10098 pu128Dst->au64[1] = u128Value.au64[1];
10099 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10100}
10101#endif
10102
10103
10104/**
10105 * Stores a data dqword, SSE aligned.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10115{
10116 /* The lazy approach for now... */
10117 if ( (GCPtrMem & 15)
10118 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10119 return iemRaiseGeneralProtectionFault0(pVCpu);
10120
10121 PRTUINT128U pu128Dst;
10122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10123 if (rc == VINF_SUCCESS)
10124 {
10125 pu128Dst->au64[0] = u128Value.au64[0];
10126 pu128Dst->au64[1] = u128Value.au64[1];
10127 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10128 }
10129 return rc;
10130}
10131
10132
10133#ifdef IEM_WITH_SETJMP
10134/**
10135 * Stores a data dqword, SSE aligned.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param u128Value The value to store.
10143 */
10144DECL_NO_INLINE(IEM_STATIC, void)
10145iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10146{
10147 /* The lazy approach for now... */
10148 if ( (GCPtrMem & 15) == 0
10149 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10150 {
10151 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10152 pu128Dst->au64[0] = u128Value.au64[0];
10153 pu128Dst->au64[1] = u128Value.au64[1];
10154 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10155 return;
10156 }
10157
10158 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10160}
10161#endif
10162
10163
10164/**
10165 * Stores a data dqword.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param pu256Value Pointer to the value to store.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10175{
10176 /* The lazy approach for now... */
10177 PRTUINT256U pu256Dst;
10178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10179 if (rc == VINF_SUCCESS)
10180 {
10181 pu256Dst->au64[0] = pu256Value->au64[0];
10182 pu256Dst->au64[1] = pu256Value->au64[1];
10183 pu256Dst->au64[2] = pu256Value->au64[2];
10184 pu256Dst->au64[3] = pu256Value->au64[3];
10185 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10186 }
10187 return rc;
10188}
10189
10190
10191#ifdef IEM_WITH_SETJMP
10192/**
10193 * Stores a data dqword, longjmp on error.
10194 *
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 * @param pu256Value Pointer to the value to store.
10200 */
10201IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10205 pu256Dst->au64[0] = pu256Value->au64[0];
10206 pu256Dst->au64[1] = pu256Value->au64[1];
10207 pu256Dst->au64[2] = pu256Value->au64[2];
10208 pu256Dst->au64[3] = pu256Value->au64[3];
10209 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10210}
10211#endif
10212
10213
10214/**
10215 * Stores a data dqword, AVX aligned.
10216 *
10217 * @returns Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10219 * @param iSegReg The index of the segment register to use for
10220 * this access. The base and limits are checked.
10221 * @param GCPtrMem The address of the guest memory.
10222 * @param pu256Value Pointer to the value to store.
10223 */
10224IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10225{
10226 /* The lazy approach for now... */
10227 if (GCPtrMem & 31)
10228 return iemRaiseGeneralProtectionFault0(pVCpu);
10229
10230 PRTUINT256U pu256Dst;
10231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10232 if (rc == VINF_SUCCESS)
10233 {
10234 pu256Dst->au64[0] = pu256Value->au64[0];
10235 pu256Dst->au64[1] = pu256Value->au64[1];
10236 pu256Dst->au64[2] = pu256Value->au64[2];
10237 pu256Dst->au64[3] = pu256Value->au64[3];
10238 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10239 }
10240 return rc;
10241}
10242
10243
10244#ifdef IEM_WITH_SETJMP
10245/**
10246 * Stores a data dqword, AVX aligned.
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10250 * @param iSegReg The index of the segment register to use for
10251 * this access. The base and limits are checked.
10252 * @param GCPtrMem The address of the guest memory.
10253 * @param pu256Value Pointer to the value to store.
10254 */
10255DECL_NO_INLINE(IEM_STATIC, void)
10256iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10257{
10258 /* The lazy approach for now... */
10259 if ((GCPtrMem & 31) == 0)
10260 {
10261 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10262 pu256Dst->au64[0] = pu256Value->au64[0];
10263 pu256Dst->au64[1] = pu256Value->au64[1];
10264 pu256Dst->au64[2] = pu256Value->au64[2];
10265 pu256Dst->au64[3] = pu256Value->au64[3];
10266 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10267 return;
10268 }
10269
10270 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10271 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10272}
10273#endif
10274
10275
10276/**
10277 * Stores a descriptor register (sgdt, sidt).
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10281 * @param cbLimit The limit.
10282 * @param GCPtrBase The base address.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 */
10287IEM_STATIC VBOXSTRICTRC
10288iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10289{
10290 VBOXSTRICTRC rcStrict;
10291 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10292 {
10293 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10294 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10295 }
10296
10297 /*
10298 * The SIDT and SGDT instructions actually stores the data using two
10299 * independent writes. The instructions does not respond to opsize prefixes.
10300 */
10301 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10302 if (rcStrict == VINF_SUCCESS)
10303 {
10304 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10305 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10306 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10307 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10308 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10309 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10310 else
10311 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10312 }
10313 return rcStrict;
10314}
10315
10316
10317/**
10318 * Pushes a word onto the stack.
10319 *
10320 * @returns Strict VBox status code.
10321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10322 * @param u16Value The value to push.
10323 */
10324IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10325{
10326 /* Increment the stack pointer. */
10327 uint64_t uNewRsp;
10328 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10329 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10330
10331 /* Write the word the lazy way. */
10332 uint16_t *pu16Dst;
10333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10334 if (rc == VINF_SUCCESS)
10335 {
10336 *pu16Dst = u16Value;
10337 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10338 }
10339
10340 /* Commit the new RSP value unless we an access handler made trouble. */
10341 if (rc == VINF_SUCCESS)
10342 pCtx->rsp = uNewRsp;
10343
10344 return rc;
10345}
10346
10347
10348/**
10349 * Pushes a dword onto the stack.
10350 *
10351 * @returns Strict VBox status code.
10352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10353 * @param u32Value The value to push.
10354 */
10355IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10356{
10357 /* Increment the stack pointer. */
10358 uint64_t uNewRsp;
10359 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10360 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10361
10362 /* Write the dword the lazy way. */
10363 uint32_t *pu32Dst;
10364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10365 if (rc == VINF_SUCCESS)
10366 {
10367 *pu32Dst = u32Value;
10368 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10369 }
10370
10371 /* Commit the new RSP value unless we an access handler made trouble. */
10372 if (rc == VINF_SUCCESS)
10373 pCtx->rsp = uNewRsp;
10374
10375 return rc;
10376}
10377
10378
10379/**
10380 * Pushes a dword segment register value onto the stack.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param u32Value The value to push.
10385 */
10386IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10387{
10388 /* Increment the stack pointer. */
10389 uint64_t uNewRsp;
10390 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10391 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10392
10393 VBOXSTRICTRC rc;
10394 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10395 {
10396 /* The recompiler writes a full dword. */
10397 uint32_t *pu32Dst;
10398 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu32Dst = u32Value;
10402 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10403 }
10404 }
10405 else
10406 {
10407 /* The intel docs talks about zero extending the selector register
10408 value. My actual intel CPU here might be zero extending the value
10409 but it still only writes the lower word... */
10410 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10411 * happens when crossing an electric page boundrary, is the high word checked
10412 * for write accessibility or not? Probably it is. What about segment limits?
10413 * It appears this behavior is also shared with trap error codes.
10414 *
10415 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10416 * ancient hardware when it actually did change. */
10417 uint16_t *pu16Dst;
10418 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10419 if (rc == VINF_SUCCESS)
10420 {
10421 *pu16Dst = (uint16_t)u32Value;
10422 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10423 }
10424 }
10425
10426 /* Commit the new RSP value unless we an access handler made trouble. */
10427 if (rc == VINF_SUCCESS)
10428 pCtx->rsp = uNewRsp;
10429
10430 return rc;
10431}
10432
10433
10434/**
10435 * Pushes a qword onto the stack.
10436 *
10437 * @returns Strict VBox status code.
10438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10439 * @param u64Value The value to push.
10440 */
10441IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10442{
10443 /* Increment the stack pointer. */
10444 uint64_t uNewRsp;
10445 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10446 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10447
10448 /* Write the word the lazy way. */
10449 uint64_t *pu64Dst;
10450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10451 if (rc == VINF_SUCCESS)
10452 {
10453 *pu64Dst = u64Value;
10454 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10455 }
10456
10457 /* Commit the new RSP value unless we an access handler made trouble. */
10458 if (rc == VINF_SUCCESS)
10459 pCtx->rsp = uNewRsp;
10460
10461 return rc;
10462}
10463
10464
10465/**
10466 * Pops a word from the stack.
10467 *
10468 * @returns Strict VBox status code.
10469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10470 * @param pu16Value Where to store the popped value.
10471 */
10472IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10473{
10474 /* Increment the stack pointer. */
10475 uint64_t uNewRsp;
10476 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10477 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10478
10479 /* Write the word the lazy way. */
10480 uint16_t const *pu16Src;
10481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10482 if (rc == VINF_SUCCESS)
10483 {
10484 *pu16Value = *pu16Src;
10485 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10486
10487 /* Commit the new RSP value. */
10488 if (rc == VINF_SUCCESS)
10489 pCtx->rsp = uNewRsp;
10490 }
10491
10492 return rc;
10493}
10494
10495
10496/**
10497 * Pops a dword from the stack.
10498 *
10499 * @returns Strict VBox status code.
10500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10501 * @param pu32Value Where to store the popped value.
10502 */
10503IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10504{
10505 /* Increment the stack pointer. */
10506 uint64_t uNewRsp;
10507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10508 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10509
10510 /* Write the word the lazy way. */
10511 uint32_t const *pu32Src;
10512 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10513 if (rc == VINF_SUCCESS)
10514 {
10515 *pu32Value = *pu32Src;
10516 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10517
10518 /* Commit the new RSP value. */
10519 if (rc == VINF_SUCCESS)
10520 pCtx->rsp = uNewRsp;
10521 }
10522
10523 return rc;
10524}
10525
10526
10527/**
10528 * Pops a qword from the stack.
10529 *
10530 * @returns Strict VBox status code.
10531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10532 * @param pu64Value Where to store the popped value.
10533 */
10534IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10535{
10536 /* Increment the stack pointer. */
10537 uint64_t uNewRsp;
10538 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10539 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10540
10541 /* Write the word the lazy way. */
10542 uint64_t const *pu64Src;
10543 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10544 if (rc == VINF_SUCCESS)
10545 {
10546 *pu64Value = *pu64Src;
10547 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10548
10549 /* Commit the new RSP value. */
10550 if (rc == VINF_SUCCESS)
10551 pCtx->rsp = uNewRsp;
10552 }
10553
10554 return rc;
10555}
10556
10557
10558/**
10559 * Pushes a word onto the stack, using a temporary stack pointer.
10560 *
10561 * @returns Strict VBox status code.
10562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10563 * @param u16Value The value to push.
10564 * @param pTmpRsp Pointer to the temporary stack pointer.
10565 */
10566IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10567{
10568 /* Increment the stack pointer. */
10569 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10570 RTUINT64U NewRsp = *pTmpRsp;
10571 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10572
10573 /* Write the word the lazy way. */
10574 uint16_t *pu16Dst;
10575 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10576 if (rc == VINF_SUCCESS)
10577 {
10578 *pu16Dst = u16Value;
10579 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10580 }
10581
10582 /* Commit the new RSP value unless we an access handler made trouble. */
10583 if (rc == VINF_SUCCESS)
10584 *pTmpRsp = NewRsp;
10585
10586 return rc;
10587}
10588
10589
10590/**
10591 * Pushes a dword onto the stack, using a temporary stack pointer.
10592 *
10593 * @returns Strict VBox status code.
10594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10595 * @param u32Value The value to push.
10596 * @param pTmpRsp Pointer to the temporary stack pointer.
10597 */
10598IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10599{
10600 /* Increment the stack pointer. */
10601 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10602 RTUINT64U NewRsp = *pTmpRsp;
10603 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10604
10605 /* Write the word the lazy way. */
10606 uint32_t *pu32Dst;
10607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10608 if (rc == VINF_SUCCESS)
10609 {
10610 *pu32Dst = u32Value;
10611 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10612 }
10613
10614 /* Commit the new RSP value unless we an access handler made trouble. */
10615 if (rc == VINF_SUCCESS)
10616 *pTmpRsp = NewRsp;
10617
10618 return rc;
10619}
10620
10621
10622/**
10623 * Pushes a dword onto the stack, using a temporary stack pointer.
10624 *
10625 * @returns Strict VBox status code.
10626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10627 * @param u64Value The value to push.
10628 * @param pTmpRsp Pointer to the temporary stack pointer.
10629 */
10630IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10631{
10632 /* Increment the stack pointer. */
10633 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10634 RTUINT64U NewRsp = *pTmpRsp;
10635 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10636
10637 /* Write the word the lazy way. */
10638 uint64_t *pu64Dst;
10639 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10640 if (rc == VINF_SUCCESS)
10641 {
10642 *pu64Dst = u64Value;
10643 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10644 }
10645
10646 /* Commit the new RSP value unless we an access handler made trouble. */
10647 if (rc == VINF_SUCCESS)
10648 *pTmpRsp = NewRsp;
10649
10650 return rc;
10651}
10652
10653
10654/**
10655 * Pops a word from the stack, using a temporary stack pointer.
10656 *
10657 * @returns Strict VBox status code.
10658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10659 * @param pu16Value Where to store the popped value.
10660 * @param pTmpRsp Pointer to the temporary stack pointer.
10661 */
10662IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10663{
10664 /* Increment the stack pointer. */
10665 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10666 RTUINT64U NewRsp = *pTmpRsp;
10667 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10668
10669 /* Write the word the lazy way. */
10670 uint16_t const *pu16Src;
10671 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10672 if (rc == VINF_SUCCESS)
10673 {
10674 *pu16Value = *pu16Src;
10675 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10676
10677 /* Commit the new RSP value. */
10678 if (rc == VINF_SUCCESS)
10679 *pTmpRsp = NewRsp;
10680 }
10681
10682 return rc;
10683}
10684
10685
10686/**
10687 * Pops a dword from the stack, using a temporary stack pointer.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param pu32Value Where to store the popped value.
10692 * @param pTmpRsp Pointer to the temporary stack pointer.
10693 */
10694IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10695{
10696 /* Increment the stack pointer. */
10697 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10698 RTUINT64U NewRsp = *pTmpRsp;
10699 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10700
10701 /* Write the word the lazy way. */
10702 uint32_t const *pu32Src;
10703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10704 if (rc == VINF_SUCCESS)
10705 {
10706 *pu32Value = *pu32Src;
10707 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10708
10709 /* Commit the new RSP value. */
10710 if (rc == VINF_SUCCESS)
10711 *pTmpRsp = NewRsp;
10712 }
10713
10714 return rc;
10715}
10716
10717
10718/**
10719 * Pops a qword from the stack, using a temporary stack pointer.
10720 *
10721 * @returns Strict VBox status code.
10722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10723 * @param pu64Value Where to store the popped value.
10724 * @param pTmpRsp Pointer to the temporary stack pointer.
10725 */
10726IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10727{
10728 /* Increment the stack pointer. */
10729 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10730 RTUINT64U NewRsp = *pTmpRsp;
10731 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10732
10733 /* Write the word the lazy way. */
10734 uint64_t const *pu64Src;
10735 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10736 if (rcStrict == VINF_SUCCESS)
10737 {
10738 *pu64Value = *pu64Src;
10739 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10740
10741 /* Commit the new RSP value. */
10742 if (rcStrict == VINF_SUCCESS)
10743 *pTmpRsp = NewRsp;
10744 }
10745
10746 return rcStrict;
10747}
10748
10749
10750/**
10751 * Begin a special stack push (used by interrupt, exceptions and such).
10752 *
10753 * This will raise \#SS or \#PF if appropriate.
10754 *
10755 * @returns Strict VBox status code.
10756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10757 * @param cbMem The number of bytes to push onto the stack.
10758 * @param ppvMem Where to return the pointer to the stack memory.
10759 * As with the other memory functions this could be
10760 * direct access or bounce buffered access, so
10761 * don't commit register until the commit call
10762 * succeeds.
10763 * @param puNewRsp Where to return the new RSP value. This must be
10764 * passed unchanged to
10765 * iemMemStackPushCommitSpecial().
10766 */
10767IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10768{
10769 Assert(cbMem < UINT8_MAX);
10770 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10771 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10772 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10773}
10774
10775
10776/**
10777 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10778 *
10779 * This will update the rSP.
10780 *
10781 * @returns Strict VBox status code.
10782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10783 * @param pvMem The pointer returned by
10784 * iemMemStackPushBeginSpecial().
10785 * @param uNewRsp The new RSP value returned by
10786 * iemMemStackPushBeginSpecial().
10787 */
10788IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10789{
10790 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10791 if (rcStrict == VINF_SUCCESS)
10792 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10793 return rcStrict;
10794}
10795
10796
10797/**
10798 * Begin a special stack pop (used by iret, retf and such).
10799 *
10800 * This will raise \#SS or \#PF if appropriate.
10801 *
10802 * @returns Strict VBox status code.
10803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10804 * @param cbMem The number of bytes to pop from the stack.
10805 * @param ppvMem Where to return the pointer to the stack memory.
10806 * @param puNewRsp Where to return the new RSP value. This must be
10807 * assigned to CPUMCTX::rsp manually some time
10808 * after iemMemStackPopDoneSpecial() has been
10809 * called.
10810 */
10811IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10812{
10813 Assert(cbMem < UINT8_MAX);
10814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10815 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10816 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10817}
10818
10819
10820/**
10821 * Continue a special stack pop (used by iret and retf).
10822 *
10823 * This will raise \#SS or \#PF if appropriate.
10824 *
10825 * @returns Strict VBox status code.
10826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10827 * @param cbMem The number of bytes to pop from the stack.
10828 * @param ppvMem Where to return the pointer to the stack memory.
10829 * @param puNewRsp Where to return the new RSP value. This must be
10830 * assigned to CPUMCTX::rsp manually some time
10831 * after iemMemStackPopDoneSpecial() has been
10832 * called.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10835{
10836 Assert(cbMem < UINT8_MAX);
10837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10838 RTUINT64U NewRsp;
10839 NewRsp.u = *puNewRsp;
10840 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10841 *puNewRsp = NewRsp.u;
10842 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10843}
10844
10845
10846/**
10847 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10848 * iemMemStackPopContinueSpecial).
10849 *
10850 * The caller will manually commit the rSP.
10851 *
10852 * @returns Strict VBox status code.
10853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10854 * @param pvMem The pointer returned by
10855 * iemMemStackPopBeginSpecial() or
10856 * iemMemStackPopContinueSpecial().
10857 */
10858IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10859{
10860 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10861}
10862
10863
10864/**
10865 * Fetches a system table byte.
10866 *
10867 * @returns Strict VBox status code.
10868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10869 * @param pbDst Where to return the byte.
10870 * @param iSegReg The index of the segment register to use for
10871 * this access. The base and limits are checked.
10872 * @param GCPtrMem The address of the guest memory.
10873 */
10874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10875{
10876 /* The lazy approach for now... */
10877 uint8_t const *pbSrc;
10878 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10879 if (rc == VINF_SUCCESS)
10880 {
10881 *pbDst = *pbSrc;
10882 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10883 }
10884 return rc;
10885}
10886
10887
10888/**
10889 * Fetches a system table word.
10890 *
10891 * @returns Strict VBox status code.
10892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10893 * @param pu16Dst Where to return the word.
10894 * @param iSegReg The index of the segment register to use for
10895 * this access. The base and limits are checked.
10896 * @param GCPtrMem The address of the guest memory.
10897 */
10898IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10899{
10900 /* The lazy approach for now... */
10901 uint16_t const *pu16Src;
10902 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10903 if (rc == VINF_SUCCESS)
10904 {
10905 *pu16Dst = *pu16Src;
10906 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10907 }
10908 return rc;
10909}
10910
10911
10912/**
10913 * Fetches a system table dword.
10914 *
10915 * @returns Strict VBox status code.
10916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10917 * @param pu32Dst Where to return the dword.
10918 * @param iSegReg The index of the segment register to use for
10919 * this access. The base and limits are checked.
10920 * @param GCPtrMem The address of the guest memory.
10921 */
10922IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10923{
10924 /* The lazy approach for now... */
10925 uint32_t const *pu32Src;
10926 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10927 if (rc == VINF_SUCCESS)
10928 {
10929 *pu32Dst = *pu32Src;
10930 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10931 }
10932 return rc;
10933}
10934
10935
10936/**
10937 * Fetches a system table qword.
10938 *
10939 * @returns Strict VBox status code.
10940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10941 * @param pu64Dst Where to return the qword.
10942 * @param iSegReg The index of the segment register to use for
10943 * this access. The base and limits are checked.
10944 * @param GCPtrMem The address of the guest memory.
10945 */
10946IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10947{
10948 /* The lazy approach for now... */
10949 uint64_t const *pu64Src;
10950 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10951 if (rc == VINF_SUCCESS)
10952 {
10953 *pu64Dst = *pu64Src;
10954 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10955 }
10956 return rc;
10957}
10958
10959
10960/**
10961 * Fetches a descriptor table entry with caller specified error code.
10962 *
10963 * @returns Strict VBox status code.
10964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10965 * @param pDesc Where to return the descriptor table entry.
10966 * @param uSel The selector which table entry to fetch.
10967 * @param uXcpt The exception to raise on table lookup error.
10968 * @param uErrorCode The error code associated with the exception.
10969 */
10970IEM_STATIC VBOXSTRICTRC
10971iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10972{
10973 AssertPtr(pDesc);
10974 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10975
10976 /** @todo did the 286 require all 8 bytes to be accessible? */
10977 /*
10978 * Get the selector table base and check bounds.
10979 */
10980 RTGCPTR GCPtrBase;
10981 if (uSel & X86_SEL_LDT)
10982 {
10983 if ( !pCtx->ldtr.Attr.n.u1Present
10984 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10985 {
10986 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10987 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10988 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10989 uErrorCode, 0);
10990 }
10991
10992 Assert(pCtx->ldtr.Attr.n.u1Present);
10993 GCPtrBase = pCtx->ldtr.u64Base;
10994 }
10995 else
10996 {
10997 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10998 {
10999 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11000 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11001 uErrorCode, 0);
11002 }
11003 GCPtrBase = pCtx->gdtr.pGdt;
11004 }
11005
11006 /*
11007 * Read the legacy descriptor and maybe the long mode extensions if
11008 * required.
11009 */
11010 VBOXSTRICTRC rcStrict;
11011 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11012 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11013 else
11014 {
11015 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11016 if (rcStrict == VINF_SUCCESS)
11017 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11018 if (rcStrict == VINF_SUCCESS)
11019 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11020 if (rcStrict == VINF_SUCCESS)
11021 pDesc->Legacy.au16[3] = 0;
11022 else
11023 return rcStrict;
11024 }
11025
11026 if (rcStrict == VINF_SUCCESS)
11027 {
11028 if ( !IEM_IS_LONG_MODE(pVCpu)
11029 || pDesc->Legacy.Gen.u1DescType)
11030 pDesc->Long.au64[1] = 0;
11031 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11032 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11033 else
11034 {
11035 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11036 /** @todo is this the right exception? */
11037 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11038 }
11039 }
11040 return rcStrict;
11041}
11042
11043
11044/**
11045 * Fetches a descriptor table entry.
11046 *
11047 * @returns Strict VBox status code.
11048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11049 * @param pDesc Where to return the descriptor table entry.
11050 * @param uSel The selector which table entry to fetch.
11051 * @param uXcpt The exception to raise on table lookup error.
11052 */
11053IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11054{
11055 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11056}
11057
11058
11059/**
11060 * Fakes a long mode stack selector for SS = 0.
11061 *
11062 * @param pDescSs Where to return the fake stack descriptor.
11063 * @param uDpl The DPL we want.
11064 */
11065IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11066{
11067 pDescSs->Long.au64[0] = 0;
11068 pDescSs->Long.au64[1] = 0;
11069 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11070 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11071 pDescSs->Long.Gen.u2Dpl = uDpl;
11072 pDescSs->Long.Gen.u1Present = 1;
11073 pDescSs->Long.Gen.u1Long = 1;
11074}
11075
11076
11077/**
11078 * Marks the selector descriptor as accessed (only non-system descriptors).
11079 *
11080 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11081 * will therefore skip the limit checks.
11082 *
11083 * @returns Strict VBox status code.
11084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11085 * @param uSel The selector.
11086 */
11087IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11088{
11089 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11090
11091 /*
11092 * Get the selector table base and calculate the entry address.
11093 */
11094 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11095 ? pCtx->ldtr.u64Base
11096 : pCtx->gdtr.pGdt;
11097 GCPtr += uSel & X86_SEL_MASK;
11098
11099 /*
11100 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11101 * ugly stuff to avoid this. This will make sure it's an atomic access
11102 * as well more or less remove any question about 8-bit or 32-bit accesss.
11103 */
11104 VBOXSTRICTRC rcStrict;
11105 uint32_t volatile *pu32;
11106 if ((GCPtr & 3) == 0)
11107 {
11108 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11109 GCPtr += 2 + 2;
11110 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11111 if (rcStrict != VINF_SUCCESS)
11112 return rcStrict;
11113 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11114 }
11115 else
11116 {
11117 /* The misaligned GDT/LDT case, map the whole thing. */
11118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11119 if (rcStrict != VINF_SUCCESS)
11120 return rcStrict;
11121 switch ((uintptr_t)pu32 & 3)
11122 {
11123 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11124 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11125 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11126 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11127 }
11128 }
11129
11130 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11131}
11132
11133/** @} */
11134
11135
11136/*
11137 * Include the C/C++ implementation of instruction.
11138 */
11139#include "IEMAllCImpl.cpp.h"
11140
11141
11142
11143/** @name "Microcode" macros.
11144 *
11145 * The idea is that we should be able to use the same code to interpret
11146 * instructions as well as recompiler instructions. Thus this obfuscation.
11147 *
11148 * @{
11149 */
11150#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11151#define IEM_MC_END() }
11152#define IEM_MC_PAUSE() do {} while (0)
11153#define IEM_MC_CONTINUE() do {} while (0)
11154
11155/** Internal macro. */
11156#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11157 do \
11158 { \
11159 VBOXSTRICTRC rcStrict2 = a_Expr; \
11160 if (rcStrict2 != VINF_SUCCESS) \
11161 return rcStrict2; \
11162 } while (0)
11163
11164
11165#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11166#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11167#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11168#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11169#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11170#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11171#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11172#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11173#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11174 do { \
11175 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11179 do { \
11180 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11181 return iemRaiseDeviceNotAvailable(pVCpu); \
11182 } while (0)
11183#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11184 do { \
11185 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11186 return iemRaiseMathFault(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11189 do { \
11190 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11191 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11192 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11195 return iemRaiseDeviceNotAvailable(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11198 do { \
11199 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11200 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11207 do { \
11208 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11209 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11210 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11213 return iemRaiseDeviceNotAvailable(pVCpu); \
11214 } while (0)
11215#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11216 do { \
11217 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11218 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11219 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11220 return iemRaiseUndefinedOpcode(pVCpu); \
11221 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11222 return iemRaiseDeviceNotAvailable(pVCpu); \
11223 } while (0)
11224#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11225 do { \
11226 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11227 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11228 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11229 return iemRaiseUndefinedOpcode(pVCpu); \
11230 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11231 return iemRaiseDeviceNotAvailable(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11234 do { \
11235 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11236 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11237 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11238 return iemRaiseUndefinedOpcode(pVCpu); \
11239 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11240 return iemRaiseDeviceNotAvailable(pVCpu); \
11241 } while (0)
11242#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11243 do { \
11244 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11246 return iemRaiseUndefinedOpcode(pVCpu); \
11247 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11248 return iemRaiseDeviceNotAvailable(pVCpu); \
11249 } while (0)
11250#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11251 do { \
11252 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11253 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11254 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11255 return iemRaiseUndefinedOpcode(pVCpu); \
11256 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11257 return iemRaiseDeviceNotAvailable(pVCpu); \
11258 } while (0)
11259#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11260 do { \
11261 if (pVCpu->iem.s.uCpl != 0) \
11262 return iemRaiseGeneralProtectionFault0(pVCpu); \
11263 } while (0)
11264#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11265 do { \
11266 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11267 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11268 } while (0)
11269#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11270 do { \
11271 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11272 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11273 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11274 return iemRaiseUndefinedOpcode(pVCpu); \
11275 } while (0)
11276#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11277 do { \
11278 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11279 return iemRaiseGeneralProtectionFault0(pVCpu); \
11280 } while (0)
11281
11282
11283#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11284#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11285#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11286#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11287#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11288#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11289#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11290 uint32_t a_Name; \
11291 uint32_t *a_pName = &a_Name
11292#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11293 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11294
11295#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11296#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11297
11298#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11315#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11316#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11317#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11318#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11319#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11320#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11321#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11322#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11323#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11324#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11325#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11326#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11327#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11328#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11329/** @note Not for IOPL or IF testing or modification. */
11330#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11331#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11332#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11333#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11334
11335#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11336#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11337#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11338#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11339#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11340#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11341#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11342#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11343#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11344#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11345#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11346#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11347#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11348 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11349
11350
11351#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11352#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11353/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11354 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11355#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11356#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11357/** @note Not for IOPL or IF testing or modification. */
11358#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11359
11360#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11361#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11362#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11363 do { \
11364 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11365 *pu32Reg += (a_u32Value); \
11366 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11367 } while (0)
11368#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11369
11370#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11371#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11372#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11373 do { \
11374 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11375 *pu32Reg -= (a_u32Value); \
11376 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11377 } while (0)
11378#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11379#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11380
11381#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11382#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11383#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11384#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11385#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11386#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11387#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11388
11389#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11390#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11391#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11392#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11393
11394#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11395#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11396#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11397
11398#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11399#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11400#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11401
11402#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11403#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11404#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11405
11406#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11407#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11408#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11409
11410#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11411
11412#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11413
11414#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11415#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11416#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11417 do { \
11418 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11419 *pu32Reg &= (a_u32Value); \
11420 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11421 } while (0)
11422#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11423
11424#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11425#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11426#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11427 do { \
11428 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11429 *pu32Reg |= (a_u32Value); \
11430 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11431 } while (0)
11432#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11433
11434
11435/** @note Not for IOPL or IF modification. */
11436#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11437/** @note Not for IOPL or IF modification. */
11438#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11439/** @note Not for IOPL or IF modification. */
11440#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11441
11442#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11443
11444/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11445#define IEM_MC_FPU_TO_MMX_MODE() do { \
11446 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11447 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11448 } while (0)
11449
11450/** Switches the FPU state from MMX mode (FTW=0xffff). */
11451#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11452 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11453 } while (0)
11454
11455#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11456 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11457#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11458 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11459#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11460 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11461 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11462 } while (0)
11463#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11464 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11465 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11466 } while (0)
11467#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11468 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11469#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11470 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11471#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11472 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11473
11474#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11475 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11476 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11477 } while (0)
11478#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11479 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11480#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11481 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11482#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11483 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11484#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11485 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11486 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11487 } while (0)
11488#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11489 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11490#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11491 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11492 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11493 } while (0)
11494#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11495 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11496#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11497 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11498 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11499 } while (0)
11500#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11501 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11502#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11503 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11504#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11505 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11506#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11507 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11508#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11509 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11510 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11511 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11512 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11513 } while (0)
11514
11515#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11516 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11517 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11518 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11519 } while (0)
11520#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11521 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11522 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11523 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11524 } while (0)
11525#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11526 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11527 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11528 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11529 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11530 } while (0)
11531#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11532 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11533 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11534 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11535 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11536 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11537 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11538 } while (0)
11539
11540#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11541#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11542 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11543 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11544 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11545 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11546 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11547 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11548 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11549 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11550 } while (0)
11551#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11552 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11553 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11558 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11559 } while (0)
11560#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11561 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11562 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11568 } while (0)
11569#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11570 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11571 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11576 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11577 } while (0)
11578
11579#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11580 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11581#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11582 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11583#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11584 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11585#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11586 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11587 uintptr_t const iYRegTmp = (a_iYReg); \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11590 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11591 } while (0)
11592
11593#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11594 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11595 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11596 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11597 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11598 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11602 } while (0)
11603#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11604 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11605 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11606 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11607 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11609 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11611 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11612 } while (0)
11613#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11614 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11615 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11616 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11622 } while (0)
11623
11624#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11625 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11626 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11627 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11628 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11631 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11633 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11634 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11635 } while (0)
11636#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11637 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11638 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11639 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11640 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11641 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11642 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11643 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11644 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11645 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11646 } while (0)
11647#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11648 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11649 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11650 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11651 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11653 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11655 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11656 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11657 } while (0)
11658#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11659 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11660 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11661 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11662 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11664 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11666 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11667 } while (0)
11668
11669#ifndef IEM_WITH_SETJMP
11670# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11672# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11674# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11676#else
11677# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11678 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11679# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11680 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11681# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11682 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11683#endif
11684
11685#ifndef IEM_WITH_SETJMP
11686# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11688# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11690# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11692#else
11693# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11694 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11695# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11696 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11697# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11698 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11699#endif
11700
11701#ifndef IEM_WITH_SETJMP
11702# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11704# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11706# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11708#else
11709# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11710 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11711# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11712 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11713# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11714 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11715#endif
11716
11717#ifdef SOME_UNUSED_FUNCTION
11718# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11720#endif
11721
11722#ifndef IEM_WITH_SETJMP
11723# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11727# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11729# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11731#else
11732# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11735 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11736# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11738# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11739 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11740#endif
11741
11742#ifndef IEM_WITH_SETJMP
11743# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11749#else
11750# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11755 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11756#endif
11757
11758#ifndef IEM_WITH_SETJMP
11759# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11763#else
11764# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11765 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11766# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11767 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11768#endif
11769
11770#ifndef IEM_WITH_SETJMP
11771# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11773# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11775#else
11776# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11777 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11778# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11779 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11780#endif
11781
11782
11783
11784#ifndef IEM_WITH_SETJMP
11785# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11786 do { \
11787 uint8_t u8Tmp; \
11788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11789 (a_u16Dst) = u8Tmp; \
11790 } while (0)
11791# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11792 do { \
11793 uint8_t u8Tmp; \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11795 (a_u32Dst) = u8Tmp; \
11796 } while (0)
11797# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11798 do { \
11799 uint8_t u8Tmp; \
11800 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11801 (a_u64Dst) = u8Tmp; \
11802 } while (0)
11803# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11804 do { \
11805 uint16_t u16Tmp; \
11806 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11807 (a_u32Dst) = u16Tmp; \
11808 } while (0)
11809# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11810 do { \
11811 uint16_t u16Tmp; \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11813 (a_u64Dst) = u16Tmp; \
11814 } while (0)
11815# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11816 do { \
11817 uint32_t u32Tmp; \
11818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11819 (a_u64Dst) = u32Tmp; \
11820 } while (0)
11821#else /* IEM_WITH_SETJMP */
11822# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11823 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11824# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11825 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11826# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11828# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11829 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11830# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11831 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11832# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11833 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11834#endif /* IEM_WITH_SETJMP */
11835
11836#ifndef IEM_WITH_SETJMP
11837# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11838 do { \
11839 uint8_t u8Tmp; \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11841 (a_u16Dst) = (int8_t)u8Tmp; \
11842 } while (0)
11843# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11844 do { \
11845 uint8_t u8Tmp; \
11846 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11847 (a_u32Dst) = (int8_t)u8Tmp; \
11848 } while (0)
11849# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 do { \
11851 uint8_t u8Tmp; \
11852 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11853 (a_u64Dst) = (int8_t)u8Tmp; \
11854 } while (0)
11855# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11856 do { \
11857 uint16_t u16Tmp; \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11859 (a_u32Dst) = (int16_t)u16Tmp; \
11860 } while (0)
11861# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11862 do { \
11863 uint16_t u16Tmp; \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11865 (a_u64Dst) = (int16_t)u16Tmp; \
11866 } while (0)
11867# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11868 do { \
11869 uint32_t u32Tmp; \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11871 (a_u64Dst) = (int32_t)u32Tmp; \
11872 } while (0)
11873#else /* IEM_WITH_SETJMP */
11874# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11875 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11876# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11877 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11878# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11879 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11880# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11881 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11882# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11883 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11884# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11885 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11886#endif /* IEM_WITH_SETJMP */
11887
11888#ifndef IEM_WITH_SETJMP
11889# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11891# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11893# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11895# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11896 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11897#else
11898# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11899 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11900# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11901 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11902# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11903 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11904# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11905 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11906#endif
11907
11908#ifndef IEM_WITH_SETJMP
11909# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11911# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11913# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11915# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11917#else
11918# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11919 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11920# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11921 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11922# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11923 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11924# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11925 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11926#endif
11927
11928#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11929#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11930#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11931#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11932#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11933#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11934#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11935 do { \
11936 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11937 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11938 } while (0)
11939
11940#ifndef IEM_WITH_SETJMP
11941# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11942 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11943# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11944 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11945#else
11946# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11947 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11948# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11949 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11950#endif
11951
11952#ifndef IEM_WITH_SETJMP
11953# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11954 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11955# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11956 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11957#else
11958# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11959 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11960# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11961 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11962#endif
11963
11964
11965#define IEM_MC_PUSH_U16(a_u16Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11967#define IEM_MC_PUSH_U32(a_u32Value) \
11968 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11969#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11970 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11971#define IEM_MC_PUSH_U64(a_u64Value) \
11972 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11973
11974#define IEM_MC_POP_U16(a_pu16Value) \
11975 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11976#define IEM_MC_POP_U32(a_pu32Value) \
11977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11978#define IEM_MC_POP_U64(a_pu64Value) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11980
11981/** Maps guest memory for direct or bounce buffered access.
11982 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11983 * @remarks May return.
11984 */
11985#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11987
11988/** Maps guest memory for direct or bounce buffered access.
11989 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11990 * @remarks May return.
11991 */
11992#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11993 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11994
11995/** Commits the memory and unmaps the guest memory.
11996 * @remarks May return.
11997 */
11998#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11999 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12000
12001/** Commits the memory and unmaps the guest memory unless the FPU status word
12002 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12003 * that would cause FLD not to store.
12004 *
12005 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12006 * store, while \#P will not.
12007 *
12008 * @remarks May in theory return - for now.
12009 */
12010#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12011 do { \
12012 if ( !(a_u16FSW & X86_FSW_ES) \
12013 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12014 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12015 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12016 } while (0)
12017
12018/** Calculate efficient address from R/M. */
12019#ifndef IEM_WITH_SETJMP
12020# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12021 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12022#else
12023# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12024 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12025#endif
12026
12027#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12028#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12029#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12030#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12031#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12032#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12033#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12034
12035/**
12036 * Defers the rest of the instruction emulation to a C implementation routine
12037 * and returns, only taking the standard parameters.
12038 *
12039 * @param a_pfnCImpl The pointer to the C routine.
12040 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12041 */
12042#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12043
12044/**
12045 * Defers the rest of instruction emulation to a C implementation routine and
12046 * returns, taking one argument in addition to the standard ones.
12047 *
12048 * @param a_pfnCImpl The pointer to the C routine.
12049 * @param a0 The argument.
12050 */
12051#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12052
12053/**
12054 * Defers the rest of the instruction emulation to a C implementation routine
12055 * and returns, taking two arguments in addition to the standard ones.
12056 *
12057 * @param a_pfnCImpl The pointer to the C routine.
12058 * @param a0 The first extra argument.
12059 * @param a1 The second extra argument.
12060 */
12061#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12062
12063/**
12064 * Defers the rest of the instruction emulation to a C implementation routine
12065 * and returns, taking three arguments in addition to the standard ones.
12066 *
12067 * @param a_pfnCImpl The pointer to the C routine.
12068 * @param a0 The first extra argument.
12069 * @param a1 The second extra argument.
12070 * @param a2 The third extra argument.
12071 */
12072#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12073
12074/**
12075 * Defers the rest of the instruction emulation to a C implementation routine
12076 * and returns, taking four arguments in addition to the standard ones.
12077 *
12078 * @param a_pfnCImpl The pointer to the C routine.
12079 * @param a0 The first extra argument.
12080 * @param a1 The second extra argument.
12081 * @param a2 The third extra argument.
12082 * @param a3 The fourth extra argument.
12083 */
12084#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12085
12086/**
12087 * Defers the rest of the instruction emulation to a C implementation routine
12088 * and returns, taking two arguments in addition to the standard ones.
12089 *
12090 * @param a_pfnCImpl The pointer to the C routine.
12091 * @param a0 The first extra argument.
12092 * @param a1 The second extra argument.
12093 * @param a2 The third extra argument.
12094 * @param a3 The fourth extra argument.
12095 * @param a4 The fifth extra argument.
12096 */
12097#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12098
12099/**
12100 * Defers the entire instruction emulation to a C implementation routine and
12101 * returns, only taking the standard parameters.
12102 *
12103 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12104 *
12105 * @param a_pfnCImpl The pointer to the C routine.
12106 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12107 */
12108#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12109
12110/**
12111 * Defers the entire instruction emulation to a C implementation routine and
12112 * returns, taking one argument in addition to the standard ones.
12113 *
12114 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12115 *
12116 * @param a_pfnCImpl The pointer to the C routine.
12117 * @param a0 The argument.
12118 */
12119#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12120
12121/**
12122 * Defers the entire instruction emulation to a C implementation routine and
12123 * returns, taking two arguments in addition to the standard ones.
12124 *
12125 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12126 *
12127 * @param a_pfnCImpl The pointer to the C routine.
12128 * @param a0 The first extra argument.
12129 * @param a1 The second extra argument.
12130 */
12131#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12132
12133/**
12134 * Defers the entire instruction emulation to a C implementation routine and
12135 * returns, taking three arguments in addition to the standard ones.
12136 *
12137 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12138 *
12139 * @param a_pfnCImpl The pointer to the C routine.
12140 * @param a0 The first extra argument.
12141 * @param a1 The second extra argument.
12142 * @param a2 The third extra argument.
12143 */
12144#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12145
12146/**
12147 * Calls a FPU assembly implementation taking one visible argument.
12148 *
12149 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12150 * @param a0 The first extra argument.
12151 */
12152#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12153 do { \
12154 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12155 } while (0)
12156
12157/**
12158 * Calls a FPU assembly implementation taking two visible arguments.
12159 *
12160 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12161 * @param a0 The first extra argument.
12162 * @param a1 The second extra argument.
12163 */
12164#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12165 do { \
12166 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12167 } while (0)
12168
12169/**
12170 * Calls a FPU assembly implementation taking three visible arguments.
12171 *
12172 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12173 * @param a0 The first extra argument.
12174 * @param a1 The second extra argument.
12175 * @param a2 The third extra argument.
12176 */
12177#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12178 do { \
12179 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12180 } while (0)
12181
12182#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12183 do { \
12184 (a_FpuData).FSW = (a_FSW); \
12185 (a_FpuData).r80Result = *(a_pr80Value); \
12186 } while (0)
12187
12188/** Pushes FPU result onto the stack. */
12189#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12190 iemFpuPushResult(pVCpu, &a_FpuData)
12191/** Pushes FPU result onto the stack and sets the FPUDP. */
12192#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12193 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12194
12195/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12196#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12197 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12198
12199/** Stores FPU result in a stack register. */
12200#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12201 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12202/** Stores FPU result in a stack register and pops the stack. */
12203#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12204 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12205/** Stores FPU result in a stack register and sets the FPUDP. */
12206#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12207 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12208/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12209 * stack. */
12210#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12211 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12212
12213/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12214#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12215 iemFpuUpdateOpcodeAndIp(pVCpu)
12216/** Free a stack register (for FFREE and FFREEP). */
12217#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12218 iemFpuStackFree(pVCpu, a_iStReg)
12219/** Increment the FPU stack pointer. */
12220#define IEM_MC_FPU_STACK_INC_TOP() \
12221 iemFpuStackIncTop(pVCpu)
12222/** Decrement the FPU stack pointer. */
12223#define IEM_MC_FPU_STACK_DEC_TOP() \
12224 iemFpuStackDecTop(pVCpu)
12225
12226/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12227#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12228 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12229/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12230#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12231 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12232/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12233#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12234 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12235/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12236#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12237 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12238/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12239 * stack. */
12240#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12241 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12242/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12243#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12244 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12245
12246/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12247#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12248 iemFpuStackUnderflow(pVCpu, a_iStDst)
12249/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12250 * stack. */
12251#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12252 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12253/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12254 * FPUDS. */
12255#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12256 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12257/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12258 * FPUDS. Pops stack. */
12259#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12260 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12261/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12262 * stack twice. */
12263#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12264 iemFpuStackUnderflowThenPopPop(pVCpu)
12265/** Raises a FPU stack underflow exception for an instruction pushing a result
12266 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12267#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12268 iemFpuStackPushUnderflow(pVCpu)
12269/** Raises a FPU stack underflow exception for an instruction pushing a result
12270 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12271#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12272 iemFpuStackPushUnderflowTwo(pVCpu)
12273
12274/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12275 * FPUIP, FPUCS and FOP. */
12276#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12277 iemFpuStackPushOverflow(pVCpu)
12278/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12279 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12280#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12281 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12282/** Prepares for using the FPU state.
12283 * Ensures that we can use the host FPU in the current context (RC+R0.
12284 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12285#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12286/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12287#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12288/** Actualizes the guest FPU state so it can be accessed and modified. */
12289#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12290
12291/** Prepares for using the SSE state.
12292 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12293 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12294#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12295/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12296#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12297/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12298#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12299
12300/** Prepares for using the AVX state.
12301 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12302 * Ensures the guest AVX state in the CPUMCTX is up to date.
12303 * @note This will include the AVX512 state too when support for it is added
12304 * due to the zero extending feature of VEX instruction. */
12305#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12306/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12307#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12308/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12309#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12310
12311/**
12312 * Calls a MMX assembly implementation taking two visible arguments.
12313 *
12314 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12315 * @param a0 The first extra argument.
12316 * @param a1 The second extra argument.
12317 */
12318#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12319 do { \
12320 IEM_MC_PREPARE_FPU_USAGE(); \
12321 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12322 } while (0)
12323
12324/**
12325 * Calls a MMX assembly implementation taking three visible arguments.
12326 *
12327 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12328 * @param a0 The first extra argument.
12329 * @param a1 The second extra argument.
12330 * @param a2 The third extra argument.
12331 */
12332#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12333 do { \
12334 IEM_MC_PREPARE_FPU_USAGE(); \
12335 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12336 } while (0)
12337
12338
12339/**
12340 * Calls a SSE assembly implementation taking two visible arguments.
12341 *
12342 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12343 * @param a0 The first extra argument.
12344 * @param a1 The second extra argument.
12345 */
12346#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12347 do { \
12348 IEM_MC_PREPARE_SSE_USAGE(); \
12349 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12350 } while (0)
12351
12352/**
12353 * Calls a SSE assembly implementation taking three visible arguments.
12354 *
12355 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12356 * @param a0 The first extra argument.
12357 * @param a1 The second extra argument.
12358 * @param a2 The third extra argument.
12359 */
12360#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12361 do { \
12362 IEM_MC_PREPARE_SSE_USAGE(); \
12363 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12364 } while (0)
12365
12366
12367/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12368 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12369#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12370 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12371
12372/**
12373 * Calls a AVX assembly implementation taking two visible arguments.
12374 *
12375 * There is one implicit zero'th argument, a pointer to the extended state.
12376 *
12377 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12378 * @param a1 The first extra argument.
12379 * @param a2 The second extra argument.
12380 */
12381#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12382 do { \
12383 IEM_MC_PREPARE_AVX_USAGE(); \
12384 a_pfnAImpl(pXState, (a1), (a2)); \
12385 } while (0)
12386
12387/**
12388 * Calls a AVX assembly implementation taking three visible arguments.
12389 *
12390 * There is one implicit zero'th argument, a pointer to the extended state.
12391 *
12392 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12393 * @param a1 The first extra argument.
12394 * @param a2 The second extra argument.
12395 * @param a3 The third extra argument.
12396 */
12397#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12398 do { \
12399 IEM_MC_PREPARE_AVX_USAGE(); \
12400 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12401 } while (0)
12402
12403/** @note Not for IOPL or IF testing. */
12404#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12405/** @note Not for IOPL or IF testing. */
12406#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12407/** @note Not for IOPL or IF testing. */
12408#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12409/** @note Not for IOPL or IF testing. */
12410#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12413 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12414 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12417 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12418 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12421 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12422 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12423 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12426 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12427 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12428 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12429#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12430#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12431#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12432/** @note Not for IOPL or IF testing. */
12433#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12434 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12435 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12436/** @note Not for IOPL or IF testing. */
12437#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12438 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12439 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12440/** @note Not for IOPL or IF testing. */
12441#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12442 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12443 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12444/** @note Not for IOPL or IF testing. */
12445#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12446 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12447 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12448/** @note Not for IOPL or IF testing. */
12449#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12450 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12451 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12452/** @note Not for IOPL or IF testing. */
12453#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12454 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12455 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12456#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12457#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12458
12459#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12460 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12461#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12462 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12463#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12464 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12465#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12466 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12467#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12468 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12469#define IEM_MC_IF_FCW_IM() \
12470 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12471
12472#define IEM_MC_ELSE() } else {
12473#define IEM_MC_ENDIF() } do {} while (0)
12474
12475/** @} */
12476
12477
12478/** @name Opcode Debug Helpers.
12479 * @{
12480 */
12481#ifdef VBOX_WITH_STATISTICS
12482# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12483#else
12484# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12485#endif
12486
12487#ifdef DEBUG
12488# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12489 do { \
12490 IEMOP_INC_STATS(a_Stats); \
12491 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12492 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12493 } while (0)
12494
12495# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12496 do { \
12497 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12498 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12499 (void)RT_CONCAT(OP_,a_Upper); \
12500 (void)(a_fDisHints); \
12501 (void)(a_fIemHints); \
12502 } while (0)
12503
12504# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12505 do { \
12506 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12507 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12508 (void)RT_CONCAT(OP_,a_Upper); \
12509 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12510 (void)(a_fDisHints); \
12511 (void)(a_fIemHints); \
12512 } while (0)
12513
12514# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12515 do { \
12516 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12517 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12518 (void)RT_CONCAT(OP_,a_Upper); \
12519 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12520 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12521 (void)(a_fDisHints); \
12522 (void)(a_fIemHints); \
12523 } while (0)
12524
12525# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12526 do { \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12528 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12529 (void)RT_CONCAT(OP_,a_Upper); \
12530 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12532 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12533 (void)(a_fDisHints); \
12534 (void)(a_fIemHints); \
12535 } while (0)
12536
12537# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12538 do { \
12539 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12540 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12541 (void)RT_CONCAT(OP_,a_Upper); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12543 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12544 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12545 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12546 (void)(a_fDisHints); \
12547 (void)(a_fIemHints); \
12548 } while (0)
12549
12550#else
12551# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12552
12553# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12555# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12556 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12557# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12558 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12559# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12560 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12561# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12563
12564#endif
12565
12566#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12567 IEMOP_MNEMONIC0EX(a_Lower, \
12568 #a_Lower, \
12569 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12570#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12572 #a_Lower " " #a_Op1, \
12573 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12574#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12575 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12576 #a_Lower " " #a_Op1 "," #a_Op2, \
12577 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12578#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12579 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12580 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12581 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12582#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12583 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12584 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12585 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12586
12587/** @} */
12588
12589
12590/** @name Opcode Helpers.
12591 * @{
12592 */
12593
12594#ifdef IN_RING3
12595# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12596 do { \
12597 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12598 else \
12599 { \
12600 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12601 return IEMOP_RAISE_INVALID_OPCODE(); \
12602 } \
12603 } while (0)
12604#else
12605# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12606 do { \
12607 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12608 else return IEMOP_RAISE_INVALID_OPCODE(); \
12609 } while (0)
12610#endif
12611
12612/** The instruction requires a 186 or later. */
12613#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12614# define IEMOP_HLP_MIN_186() do { } while (0)
12615#else
12616# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12617#endif
12618
12619/** The instruction requires a 286 or later. */
12620#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12621# define IEMOP_HLP_MIN_286() do { } while (0)
12622#else
12623# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12624#endif
12625
12626/** The instruction requires a 386 or later. */
12627#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12628# define IEMOP_HLP_MIN_386() do { } while (0)
12629#else
12630# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12631#endif
12632
12633/** The instruction requires a 386 or later if the given expression is true. */
12634#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12635# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12636#else
12637# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12638#endif
12639
12640/** The instruction requires a 486 or later. */
12641#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12642# define IEMOP_HLP_MIN_486() do { } while (0)
12643#else
12644# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12645#endif
12646
12647/** The instruction requires a Pentium (586) or later. */
12648#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12649# define IEMOP_HLP_MIN_586() do { } while (0)
12650#else
12651# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12652#endif
12653
12654/** The instruction requires a PentiumPro (686) or later. */
12655#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12656# define IEMOP_HLP_MIN_686() do { } while (0)
12657#else
12658# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12659#endif
12660
12661
12662/** The instruction raises an \#UD in real and V8086 mode. */
12663#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12664 do \
12665 { \
12666 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12667 else return IEMOP_RAISE_INVALID_OPCODE(); \
12668 } while (0)
12669
12670/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12671 * 64-bit mode. */
12672#define IEMOP_HLP_NO_64BIT() \
12673 do \
12674 { \
12675 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12676 return IEMOP_RAISE_INVALID_OPCODE(); \
12677 } while (0)
12678
12679/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12680 * 64-bit mode. */
12681#define IEMOP_HLP_ONLY_64BIT() \
12682 do \
12683 { \
12684 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12685 return IEMOP_RAISE_INVALID_OPCODE(); \
12686 } while (0)
12687
12688/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12689#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12690 do \
12691 { \
12692 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12693 iemRecalEffOpSize64Default(pVCpu); \
12694 } while (0)
12695
12696/** The instruction has 64-bit operand size if 64-bit mode. */
12697#define IEMOP_HLP_64BIT_OP_SIZE() \
12698 do \
12699 { \
12700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12701 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12702 } while (0)
12703
12704/** Only a REX prefix immediately preceeding the first opcode byte takes
12705 * effect. This macro helps ensuring this as well as logging bad guest code. */
12706#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12707 do \
12708 { \
12709 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12710 { \
12711 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12712 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12713 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12714 pVCpu->iem.s.uRexB = 0; \
12715 pVCpu->iem.s.uRexIndex = 0; \
12716 pVCpu->iem.s.uRexReg = 0; \
12717 iemRecalEffOpSize(pVCpu); \
12718 } \
12719 } while (0)
12720
12721/**
12722 * Done decoding.
12723 */
12724#define IEMOP_HLP_DONE_DECODING() \
12725 do \
12726 { \
12727 /*nothing for now, maybe later... */ \
12728 } while (0)
12729
12730/**
12731 * Done decoding, raise \#UD exception if lock prefix present.
12732 */
12733#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12734 do \
12735 { \
12736 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12737 { /* likely */ } \
12738 else \
12739 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12740 } while (0)
12741
12742
12743/**
12744 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12745 * repnz or size prefixes are present, or if in real or v8086 mode.
12746 */
12747#define IEMOP_HLP_DONE_VEX_DECODING() \
12748 do \
12749 { \
12750 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12751 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12752 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12753 { /* likely */ } \
12754 else \
12755 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12756 } while (0)
12757
12758/**
12759 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12760 * repnz or size prefixes are present, or if in real or v8086 mode.
12761 */
12762#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12763 do \
12764 { \
12765 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12766 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12767 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12768 && pVCpu->iem.s.uVexLength == 0)) \
12769 { /* likely */ } \
12770 else \
12771 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12772 } while (0)
12773
12774
12775/**
12776 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12777 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12778 * register 0, or if in real or v8086 mode.
12779 */
12780#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12781 do \
12782 { \
12783 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12784 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12785 && !pVCpu->iem.s.uVex3rdReg \
12786 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12787 { /* likely */ } \
12788 else \
12789 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12790 } while (0)
12791
12792/**
12793 * Done decoding VEX, no V, L=0.
12794 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12795 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12796 */
12797#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12798 do \
12799 { \
12800 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12801 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12802 && pVCpu->iem.s.uVexLength == 0 \
12803 && pVCpu->iem.s.uVex3rdReg == 0 \
12804 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12805 { /* likely */ } \
12806 else \
12807 return IEMOP_RAISE_INVALID_OPCODE(); \
12808 } while (0)
12809
12810#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12811 do \
12812 { \
12813 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12814 { /* likely */ } \
12815 else \
12816 { \
12817 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12818 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12819 } \
12820 } while (0)
12821#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12822 do \
12823 { \
12824 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12825 { /* likely */ } \
12826 else \
12827 { \
12828 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12829 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12830 } \
12831 } while (0)
12832
12833/**
12834 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12835 * are present.
12836 */
12837#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12838 do \
12839 { \
12840 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12841 { /* likely */ } \
12842 else \
12843 return IEMOP_RAISE_INVALID_OPCODE(); \
12844 } while (0)
12845
12846
12847#ifdef VBOX_WITH_NESTED_HWVIRT
12848/** Check and handles SVM nested-guest control & instruction intercept. */
12849# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12850 do \
12851 { \
12852 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12853 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12854 } while (0)
12855
12856/** Check and handle SVM nested-guest CR0 read intercept. */
12857# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12858 do \
12859 { \
12860 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12861 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12862 } while (0)
12863
12864#else /* !VBOX_WITH_NESTED_HWVIRT */
12865# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12866# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12867#endif /* !VBOX_WITH_NESTED_HWVIRT */
12868
12869
12870/**
12871 * Calculates the effective address of a ModR/M memory operand.
12872 *
12873 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12874 *
12875 * @return Strict VBox status code.
12876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12877 * @param bRm The ModRM byte.
12878 * @param cbImm The size of any immediate following the
12879 * effective address opcode bytes. Important for
12880 * RIP relative addressing.
12881 * @param pGCPtrEff Where to return the effective address.
12882 */
12883IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12884{
12885 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12886 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12887# define SET_SS_DEF() \
12888 do \
12889 { \
12890 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12891 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12892 } while (0)
12893
12894 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12895 {
12896/** @todo Check the effective address size crap! */
12897 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12898 {
12899 uint16_t u16EffAddr;
12900
12901 /* Handle the disp16 form with no registers first. */
12902 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12903 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12904 else
12905 {
12906 /* Get the displacment. */
12907 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12908 {
12909 case 0: u16EffAddr = 0; break;
12910 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12911 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12912 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12913 }
12914
12915 /* Add the base and index registers to the disp. */
12916 switch (bRm & X86_MODRM_RM_MASK)
12917 {
12918 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12919 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12920 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12921 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12922 case 4: u16EffAddr += pCtx->si; break;
12923 case 5: u16EffAddr += pCtx->di; break;
12924 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12925 case 7: u16EffAddr += pCtx->bx; break;
12926 }
12927 }
12928
12929 *pGCPtrEff = u16EffAddr;
12930 }
12931 else
12932 {
12933 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12934 uint32_t u32EffAddr;
12935
12936 /* Handle the disp32 form with no registers first. */
12937 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12938 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12939 else
12940 {
12941 /* Get the register (or SIB) value. */
12942 switch ((bRm & X86_MODRM_RM_MASK))
12943 {
12944 case 0: u32EffAddr = pCtx->eax; break;
12945 case 1: u32EffAddr = pCtx->ecx; break;
12946 case 2: u32EffAddr = pCtx->edx; break;
12947 case 3: u32EffAddr = pCtx->ebx; break;
12948 case 4: /* SIB */
12949 {
12950 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12951
12952 /* Get the index and scale it. */
12953 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12954 {
12955 case 0: u32EffAddr = pCtx->eax; break;
12956 case 1: u32EffAddr = pCtx->ecx; break;
12957 case 2: u32EffAddr = pCtx->edx; break;
12958 case 3: u32EffAddr = pCtx->ebx; break;
12959 case 4: u32EffAddr = 0; /*none */ break;
12960 case 5: u32EffAddr = pCtx->ebp; break;
12961 case 6: u32EffAddr = pCtx->esi; break;
12962 case 7: u32EffAddr = pCtx->edi; break;
12963 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12964 }
12965 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12966
12967 /* add base */
12968 switch (bSib & X86_SIB_BASE_MASK)
12969 {
12970 case 0: u32EffAddr += pCtx->eax; break;
12971 case 1: u32EffAddr += pCtx->ecx; break;
12972 case 2: u32EffAddr += pCtx->edx; break;
12973 case 3: u32EffAddr += pCtx->ebx; break;
12974 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12975 case 5:
12976 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12977 {
12978 u32EffAddr += pCtx->ebp;
12979 SET_SS_DEF();
12980 }
12981 else
12982 {
12983 uint32_t u32Disp;
12984 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12985 u32EffAddr += u32Disp;
12986 }
12987 break;
12988 case 6: u32EffAddr += pCtx->esi; break;
12989 case 7: u32EffAddr += pCtx->edi; break;
12990 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12991 }
12992 break;
12993 }
12994 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12995 case 6: u32EffAddr = pCtx->esi; break;
12996 case 7: u32EffAddr = pCtx->edi; break;
12997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12998 }
12999
13000 /* Get and add the displacement. */
13001 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13002 {
13003 case 0:
13004 break;
13005 case 1:
13006 {
13007 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13008 u32EffAddr += i8Disp;
13009 break;
13010 }
13011 case 2:
13012 {
13013 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13014 u32EffAddr += u32Disp;
13015 break;
13016 }
13017 default:
13018 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13019 }
13020
13021 }
13022 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13023 *pGCPtrEff = u32EffAddr;
13024 else
13025 {
13026 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13027 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13028 }
13029 }
13030 }
13031 else
13032 {
13033 uint64_t u64EffAddr;
13034
13035 /* Handle the rip+disp32 form with no registers first. */
13036 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13037 {
13038 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13039 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13040 }
13041 else
13042 {
13043 /* Get the register (or SIB) value. */
13044 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13045 {
13046 case 0: u64EffAddr = pCtx->rax; break;
13047 case 1: u64EffAddr = pCtx->rcx; break;
13048 case 2: u64EffAddr = pCtx->rdx; break;
13049 case 3: u64EffAddr = pCtx->rbx; break;
13050 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13051 case 6: u64EffAddr = pCtx->rsi; break;
13052 case 7: u64EffAddr = pCtx->rdi; break;
13053 case 8: u64EffAddr = pCtx->r8; break;
13054 case 9: u64EffAddr = pCtx->r9; break;
13055 case 10: u64EffAddr = pCtx->r10; break;
13056 case 11: u64EffAddr = pCtx->r11; break;
13057 case 13: u64EffAddr = pCtx->r13; break;
13058 case 14: u64EffAddr = pCtx->r14; break;
13059 case 15: u64EffAddr = pCtx->r15; break;
13060 /* SIB */
13061 case 4:
13062 case 12:
13063 {
13064 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13065
13066 /* Get the index and scale it. */
13067 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13068 {
13069 case 0: u64EffAddr = pCtx->rax; break;
13070 case 1: u64EffAddr = pCtx->rcx; break;
13071 case 2: u64EffAddr = pCtx->rdx; break;
13072 case 3: u64EffAddr = pCtx->rbx; break;
13073 case 4: u64EffAddr = 0; /*none */ break;
13074 case 5: u64EffAddr = pCtx->rbp; break;
13075 case 6: u64EffAddr = pCtx->rsi; break;
13076 case 7: u64EffAddr = pCtx->rdi; break;
13077 case 8: u64EffAddr = pCtx->r8; break;
13078 case 9: u64EffAddr = pCtx->r9; break;
13079 case 10: u64EffAddr = pCtx->r10; break;
13080 case 11: u64EffAddr = pCtx->r11; break;
13081 case 12: u64EffAddr = pCtx->r12; break;
13082 case 13: u64EffAddr = pCtx->r13; break;
13083 case 14: u64EffAddr = pCtx->r14; break;
13084 case 15: u64EffAddr = pCtx->r15; break;
13085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13086 }
13087 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13088
13089 /* add base */
13090 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13091 {
13092 case 0: u64EffAddr += pCtx->rax; break;
13093 case 1: u64EffAddr += pCtx->rcx; break;
13094 case 2: u64EffAddr += pCtx->rdx; break;
13095 case 3: u64EffAddr += pCtx->rbx; break;
13096 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13097 case 6: u64EffAddr += pCtx->rsi; break;
13098 case 7: u64EffAddr += pCtx->rdi; break;
13099 case 8: u64EffAddr += pCtx->r8; break;
13100 case 9: u64EffAddr += pCtx->r9; break;
13101 case 10: u64EffAddr += pCtx->r10; break;
13102 case 11: u64EffAddr += pCtx->r11; break;
13103 case 12: u64EffAddr += pCtx->r12; break;
13104 case 14: u64EffAddr += pCtx->r14; break;
13105 case 15: u64EffAddr += pCtx->r15; break;
13106 /* complicated encodings */
13107 case 5:
13108 case 13:
13109 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13110 {
13111 if (!pVCpu->iem.s.uRexB)
13112 {
13113 u64EffAddr += pCtx->rbp;
13114 SET_SS_DEF();
13115 }
13116 else
13117 u64EffAddr += pCtx->r13;
13118 }
13119 else
13120 {
13121 uint32_t u32Disp;
13122 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13123 u64EffAddr += (int32_t)u32Disp;
13124 }
13125 break;
13126 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13127 }
13128 break;
13129 }
13130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13131 }
13132
13133 /* Get and add the displacement. */
13134 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13135 {
13136 case 0:
13137 break;
13138 case 1:
13139 {
13140 int8_t i8Disp;
13141 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13142 u64EffAddr += i8Disp;
13143 break;
13144 }
13145 case 2:
13146 {
13147 uint32_t u32Disp;
13148 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13149 u64EffAddr += (int32_t)u32Disp;
13150 break;
13151 }
13152 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13153 }
13154
13155 }
13156
13157 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13158 *pGCPtrEff = u64EffAddr;
13159 else
13160 {
13161 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13162 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13163 }
13164 }
13165
13166 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13167 return VINF_SUCCESS;
13168}
13169
13170
13171/**
13172 * Calculates the effective address of a ModR/M memory operand.
13173 *
13174 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13175 *
13176 * @return Strict VBox status code.
13177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13178 * @param bRm The ModRM byte.
13179 * @param cbImm The size of any immediate following the
13180 * effective address opcode bytes. Important for
13181 * RIP relative addressing.
13182 * @param pGCPtrEff Where to return the effective address.
13183 * @param offRsp RSP displacement.
13184 */
13185IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13186{
13187 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13188 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13189# define SET_SS_DEF() \
13190 do \
13191 { \
13192 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13193 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13194 } while (0)
13195
13196 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13197 {
13198/** @todo Check the effective address size crap! */
13199 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13200 {
13201 uint16_t u16EffAddr;
13202
13203 /* Handle the disp16 form with no registers first. */
13204 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13205 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13206 else
13207 {
13208 /* Get the displacment. */
13209 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13210 {
13211 case 0: u16EffAddr = 0; break;
13212 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13213 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13214 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13215 }
13216
13217 /* Add the base and index registers to the disp. */
13218 switch (bRm & X86_MODRM_RM_MASK)
13219 {
13220 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13221 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13222 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13223 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13224 case 4: u16EffAddr += pCtx->si; break;
13225 case 5: u16EffAddr += pCtx->di; break;
13226 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13227 case 7: u16EffAddr += pCtx->bx; break;
13228 }
13229 }
13230
13231 *pGCPtrEff = u16EffAddr;
13232 }
13233 else
13234 {
13235 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13236 uint32_t u32EffAddr;
13237
13238 /* Handle the disp32 form with no registers first. */
13239 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13240 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13241 else
13242 {
13243 /* Get the register (or SIB) value. */
13244 switch ((bRm & X86_MODRM_RM_MASK))
13245 {
13246 case 0: u32EffAddr = pCtx->eax; break;
13247 case 1: u32EffAddr = pCtx->ecx; break;
13248 case 2: u32EffAddr = pCtx->edx; break;
13249 case 3: u32EffAddr = pCtx->ebx; break;
13250 case 4: /* SIB */
13251 {
13252 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13253
13254 /* Get the index and scale it. */
13255 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13256 {
13257 case 0: u32EffAddr = pCtx->eax; break;
13258 case 1: u32EffAddr = pCtx->ecx; break;
13259 case 2: u32EffAddr = pCtx->edx; break;
13260 case 3: u32EffAddr = pCtx->ebx; break;
13261 case 4: u32EffAddr = 0; /*none */ break;
13262 case 5: u32EffAddr = pCtx->ebp; break;
13263 case 6: u32EffAddr = pCtx->esi; break;
13264 case 7: u32EffAddr = pCtx->edi; break;
13265 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13266 }
13267 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13268
13269 /* add base */
13270 switch (bSib & X86_SIB_BASE_MASK)
13271 {
13272 case 0: u32EffAddr += pCtx->eax; break;
13273 case 1: u32EffAddr += pCtx->ecx; break;
13274 case 2: u32EffAddr += pCtx->edx; break;
13275 case 3: u32EffAddr += pCtx->ebx; break;
13276 case 4:
13277 u32EffAddr += pCtx->esp + offRsp;
13278 SET_SS_DEF();
13279 break;
13280 case 5:
13281 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13282 {
13283 u32EffAddr += pCtx->ebp;
13284 SET_SS_DEF();
13285 }
13286 else
13287 {
13288 uint32_t u32Disp;
13289 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13290 u32EffAddr += u32Disp;
13291 }
13292 break;
13293 case 6: u32EffAddr += pCtx->esi; break;
13294 case 7: u32EffAddr += pCtx->edi; break;
13295 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13296 }
13297 break;
13298 }
13299 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13300 case 6: u32EffAddr = pCtx->esi; break;
13301 case 7: u32EffAddr = pCtx->edi; break;
13302 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13303 }
13304
13305 /* Get and add the displacement. */
13306 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13307 {
13308 case 0:
13309 break;
13310 case 1:
13311 {
13312 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13313 u32EffAddr += i8Disp;
13314 break;
13315 }
13316 case 2:
13317 {
13318 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13319 u32EffAddr += u32Disp;
13320 break;
13321 }
13322 default:
13323 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13324 }
13325
13326 }
13327 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13328 *pGCPtrEff = u32EffAddr;
13329 else
13330 {
13331 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13332 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13333 }
13334 }
13335 }
13336 else
13337 {
13338 uint64_t u64EffAddr;
13339
13340 /* Handle the rip+disp32 form with no registers first. */
13341 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13342 {
13343 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13344 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13345 }
13346 else
13347 {
13348 /* Get the register (or SIB) value. */
13349 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13350 {
13351 case 0: u64EffAddr = pCtx->rax; break;
13352 case 1: u64EffAddr = pCtx->rcx; break;
13353 case 2: u64EffAddr = pCtx->rdx; break;
13354 case 3: u64EffAddr = pCtx->rbx; break;
13355 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13356 case 6: u64EffAddr = pCtx->rsi; break;
13357 case 7: u64EffAddr = pCtx->rdi; break;
13358 case 8: u64EffAddr = pCtx->r8; break;
13359 case 9: u64EffAddr = pCtx->r9; break;
13360 case 10: u64EffAddr = pCtx->r10; break;
13361 case 11: u64EffAddr = pCtx->r11; break;
13362 case 13: u64EffAddr = pCtx->r13; break;
13363 case 14: u64EffAddr = pCtx->r14; break;
13364 case 15: u64EffAddr = pCtx->r15; break;
13365 /* SIB */
13366 case 4:
13367 case 12:
13368 {
13369 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13370
13371 /* Get the index and scale it. */
13372 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13373 {
13374 case 0: u64EffAddr = pCtx->rax; break;
13375 case 1: u64EffAddr = pCtx->rcx; break;
13376 case 2: u64EffAddr = pCtx->rdx; break;
13377 case 3: u64EffAddr = pCtx->rbx; break;
13378 case 4: u64EffAddr = 0; /*none */ break;
13379 case 5: u64EffAddr = pCtx->rbp; break;
13380 case 6: u64EffAddr = pCtx->rsi; break;
13381 case 7: u64EffAddr = pCtx->rdi; break;
13382 case 8: u64EffAddr = pCtx->r8; break;
13383 case 9: u64EffAddr = pCtx->r9; break;
13384 case 10: u64EffAddr = pCtx->r10; break;
13385 case 11: u64EffAddr = pCtx->r11; break;
13386 case 12: u64EffAddr = pCtx->r12; break;
13387 case 13: u64EffAddr = pCtx->r13; break;
13388 case 14: u64EffAddr = pCtx->r14; break;
13389 case 15: u64EffAddr = pCtx->r15; break;
13390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13391 }
13392 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13393
13394 /* add base */
13395 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13396 {
13397 case 0: u64EffAddr += pCtx->rax; break;
13398 case 1: u64EffAddr += pCtx->rcx; break;
13399 case 2: u64EffAddr += pCtx->rdx; break;
13400 case 3: u64EffAddr += pCtx->rbx; break;
13401 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13402 case 6: u64EffAddr += pCtx->rsi; break;
13403 case 7: u64EffAddr += pCtx->rdi; break;
13404 case 8: u64EffAddr += pCtx->r8; break;
13405 case 9: u64EffAddr += pCtx->r9; break;
13406 case 10: u64EffAddr += pCtx->r10; break;
13407 case 11: u64EffAddr += pCtx->r11; break;
13408 case 12: u64EffAddr += pCtx->r12; break;
13409 case 14: u64EffAddr += pCtx->r14; break;
13410 case 15: u64EffAddr += pCtx->r15; break;
13411 /* complicated encodings */
13412 case 5:
13413 case 13:
13414 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13415 {
13416 if (!pVCpu->iem.s.uRexB)
13417 {
13418 u64EffAddr += pCtx->rbp;
13419 SET_SS_DEF();
13420 }
13421 else
13422 u64EffAddr += pCtx->r13;
13423 }
13424 else
13425 {
13426 uint32_t u32Disp;
13427 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13428 u64EffAddr += (int32_t)u32Disp;
13429 }
13430 break;
13431 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13432 }
13433 break;
13434 }
13435 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13436 }
13437
13438 /* Get and add the displacement. */
13439 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13440 {
13441 case 0:
13442 break;
13443 case 1:
13444 {
13445 int8_t i8Disp;
13446 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13447 u64EffAddr += i8Disp;
13448 break;
13449 }
13450 case 2:
13451 {
13452 uint32_t u32Disp;
13453 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13454 u64EffAddr += (int32_t)u32Disp;
13455 break;
13456 }
13457 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13458 }
13459
13460 }
13461
13462 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13463 *pGCPtrEff = u64EffAddr;
13464 else
13465 {
13466 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13467 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13468 }
13469 }
13470
13471 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13472 return VINF_SUCCESS;
13473}
13474
13475
13476#ifdef IEM_WITH_SETJMP
13477/**
13478 * Calculates the effective address of a ModR/M memory operand.
13479 *
13480 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13481 *
13482 * May longjmp on internal error.
13483 *
13484 * @return The effective address.
13485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13486 * @param bRm The ModRM byte.
13487 * @param cbImm The size of any immediate following the
13488 * effective address opcode bytes. Important for
13489 * RIP relative addressing.
13490 */
13491IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13492{
13493 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13494 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13495# define SET_SS_DEF() \
13496 do \
13497 { \
13498 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13499 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13500 } while (0)
13501
13502 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13503 {
13504/** @todo Check the effective address size crap! */
13505 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13506 {
13507 uint16_t u16EffAddr;
13508
13509 /* Handle the disp16 form with no registers first. */
13510 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13511 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13512 else
13513 {
13514 /* Get the displacment. */
13515 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13516 {
13517 case 0: u16EffAddr = 0; break;
13518 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13519 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13520 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13521 }
13522
13523 /* Add the base and index registers to the disp. */
13524 switch (bRm & X86_MODRM_RM_MASK)
13525 {
13526 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13527 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13528 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13529 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13530 case 4: u16EffAddr += pCtx->si; break;
13531 case 5: u16EffAddr += pCtx->di; break;
13532 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13533 case 7: u16EffAddr += pCtx->bx; break;
13534 }
13535 }
13536
13537 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13538 return u16EffAddr;
13539 }
13540
13541 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13542 uint32_t u32EffAddr;
13543
13544 /* Handle the disp32 form with no registers first. */
13545 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13546 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13547 else
13548 {
13549 /* Get the register (or SIB) value. */
13550 switch ((bRm & X86_MODRM_RM_MASK))
13551 {
13552 case 0: u32EffAddr = pCtx->eax; break;
13553 case 1: u32EffAddr = pCtx->ecx; break;
13554 case 2: u32EffAddr = pCtx->edx; break;
13555 case 3: u32EffAddr = pCtx->ebx; break;
13556 case 4: /* SIB */
13557 {
13558 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13559
13560 /* Get the index and scale it. */
13561 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13562 {
13563 case 0: u32EffAddr = pCtx->eax; break;
13564 case 1: u32EffAddr = pCtx->ecx; break;
13565 case 2: u32EffAddr = pCtx->edx; break;
13566 case 3: u32EffAddr = pCtx->ebx; break;
13567 case 4: u32EffAddr = 0; /*none */ break;
13568 case 5: u32EffAddr = pCtx->ebp; break;
13569 case 6: u32EffAddr = pCtx->esi; break;
13570 case 7: u32EffAddr = pCtx->edi; break;
13571 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13572 }
13573 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13574
13575 /* add base */
13576 switch (bSib & X86_SIB_BASE_MASK)
13577 {
13578 case 0: u32EffAddr += pCtx->eax; break;
13579 case 1: u32EffAddr += pCtx->ecx; break;
13580 case 2: u32EffAddr += pCtx->edx; break;
13581 case 3: u32EffAddr += pCtx->ebx; break;
13582 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13583 case 5:
13584 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13585 {
13586 u32EffAddr += pCtx->ebp;
13587 SET_SS_DEF();
13588 }
13589 else
13590 {
13591 uint32_t u32Disp;
13592 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13593 u32EffAddr += u32Disp;
13594 }
13595 break;
13596 case 6: u32EffAddr += pCtx->esi; break;
13597 case 7: u32EffAddr += pCtx->edi; break;
13598 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13599 }
13600 break;
13601 }
13602 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13603 case 6: u32EffAddr = pCtx->esi; break;
13604 case 7: u32EffAddr = pCtx->edi; break;
13605 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13606 }
13607
13608 /* Get and add the displacement. */
13609 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13610 {
13611 case 0:
13612 break;
13613 case 1:
13614 {
13615 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13616 u32EffAddr += i8Disp;
13617 break;
13618 }
13619 case 2:
13620 {
13621 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13622 u32EffAddr += u32Disp;
13623 break;
13624 }
13625 default:
13626 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13627 }
13628 }
13629
13630 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13631 {
13632 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13633 return u32EffAddr;
13634 }
13635 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13636 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13637 return u32EffAddr & UINT16_MAX;
13638 }
13639
13640 uint64_t u64EffAddr;
13641
13642 /* Handle the rip+disp32 form with no registers first. */
13643 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13644 {
13645 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13646 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13647 }
13648 else
13649 {
13650 /* Get the register (or SIB) value. */
13651 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13652 {
13653 case 0: u64EffAddr = pCtx->rax; break;
13654 case 1: u64EffAddr = pCtx->rcx; break;
13655 case 2: u64EffAddr = pCtx->rdx; break;
13656 case 3: u64EffAddr = pCtx->rbx; break;
13657 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13658 case 6: u64EffAddr = pCtx->rsi; break;
13659 case 7: u64EffAddr = pCtx->rdi; break;
13660 case 8: u64EffAddr = pCtx->r8; break;
13661 case 9: u64EffAddr = pCtx->r9; break;
13662 case 10: u64EffAddr = pCtx->r10; break;
13663 case 11: u64EffAddr = pCtx->r11; break;
13664 case 13: u64EffAddr = pCtx->r13; break;
13665 case 14: u64EffAddr = pCtx->r14; break;
13666 case 15: u64EffAddr = pCtx->r15; break;
13667 /* SIB */
13668 case 4:
13669 case 12:
13670 {
13671 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13672
13673 /* Get the index and scale it. */
13674 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13675 {
13676 case 0: u64EffAddr = pCtx->rax; break;
13677 case 1: u64EffAddr = pCtx->rcx; break;
13678 case 2: u64EffAddr = pCtx->rdx; break;
13679 case 3: u64EffAddr = pCtx->rbx; break;
13680 case 4: u64EffAddr = 0; /*none */ break;
13681 case 5: u64EffAddr = pCtx->rbp; break;
13682 case 6: u64EffAddr = pCtx->rsi; break;
13683 case 7: u64EffAddr = pCtx->rdi; break;
13684 case 8: u64EffAddr = pCtx->r8; break;
13685 case 9: u64EffAddr = pCtx->r9; break;
13686 case 10: u64EffAddr = pCtx->r10; break;
13687 case 11: u64EffAddr = pCtx->r11; break;
13688 case 12: u64EffAddr = pCtx->r12; break;
13689 case 13: u64EffAddr = pCtx->r13; break;
13690 case 14: u64EffAddr = pCtx->r14; break;
13691 case 15: u64EffAddr = pCtx->r15; break;
13692 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13693 }
13694 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13695
13696 /* add base */
13697 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13698 {
13699 case 0: u64EffAddr += pCtx->rax; break;
13700 case 1: u64EffAddr += pCtx->rcx; break;
13701 case 2: u64EffAddr += pCtx->rdx; break;
13702 case 3: u64EffAddr += pCtx->rbx; break;
13703 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13704 case 6: u64EffAddr += pCtx->rsi; break;
13705 case 7: u64EffAddr += pCtx->rdi; break;
13706 case 8: u64EffAddr += pCtx->r8; break;
13707 case 9: u64EffAddr += pCtx->r9; break;
13708 case 10: u64EffAddr += pCtx->r10; break;
13709 case 11: u64EffAddr += pCtx->r11; break;
13710 case 12: u64EffAddr += pCtx->r12; break;
13711 case 14: u64EffAddr += pCtx->r14; break;
13712 case 15: u64EffAddr += pCtx->r15; break;
13713 /* complicated encodings */
13714 case 5:
13715 case 13:
13716 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13717 {
13718 if (!pVCpu->iem.s.uRexB)
13719 {
13720 u64EffAddr += pCtx->rbp;
13721 SET_SS_DEF();
13722 }
13723 else
13724 u64EffAddr += pCtx->r13;
13725 }
13726 else
13727 {
13728 uint32_t u32Disp;
13729 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13730 u64EffAddr += (int32_t)u32Disp;
13731 }
13732 break;
13733 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13734 }
13735 break;
13736 }
13737 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13738 }
13739
13740 /* Get and add the displacement. */
13741 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13742 {
13743 case 0:
13744 break;
13745 case 1:
13746 {
13747 int8_t i8Disp;
13748 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13749 u64EffAddr += i8Disp;
13750 break;
13751 }
13752 case 2:
13753 {
13754 uint32_t u32Disp;
13755 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13756 u64EffAddr += (int32_t)u32Disp;
13757 break;
13758 }
13759 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13760 }
13761
13762 }
13763
13764 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13765 {
13766 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13767 return u64EffAddr;
13768 }
13769 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13770 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13771 return u64EffAddr & UINT32_MAX;
13772}
13773#endif /* IEM_WITH_SETJMP */
13774
13775
13776/** @} */
13777
13778
13779
13780/*
13781 * Include the instructions
13782 */
13783#include "IEMAllInstructions.cpp.h"
13784
13785
13786
13787
13788#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13789
13790/**
13791 * Sets up execution verification mode.
13792 */
13793IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13794{
13795 PVMCPU pVCpu = pVCpu;
13796 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13797
13798 /*
13799 * Always note down the address of the current instruction.
13800 */
13801 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13802 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13803
13804 /*
13805 * Enable verification and/or logging.
13806 */
13807 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13808 if ( fNewNoRem
13809 && ( 0
13810#if 0 /* auto enable on first paged protected mode interrupt */
13811 || ( pOrgCtx->eflags.Bits.u1IF
13812 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13813 && TRPMHasTrap(pVCpu)
13814 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13815#endif
13816#if 0
13817 || ( pOrgCtx->cs == 0x10
13818 && ( pOrgCtx->rip == 0x90119e3e
13819 || pOrgCtx->rip == 0x901d9810)
13820#endif
13821#if 0 /* Auto enable DSL - FPU stuff. */
13822 || ( pOrgCtx->cs == 0x10
13823 && (// pOrgCtx->rip == 0xc02ec07f
13824 //|| pOrgCtx->rip == 0xc02ec082
13825 //|| pOrgCtx->rip == 0xc02ec0c9
13826 0
13827 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13828#endif
13829#if 0 /* Auto enable DSL - fstp st0 stuff. */
13830 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13831#endif
13832#if 0
13833 || pOrgCtx->rip == 0x9022bb3a
13834#endif
13835#if 0
13836 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13837#endif
13838#if 0
13839 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13840 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13841#endif
13842#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13843 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13844 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13845 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13846#endif
13847#if 0 /* NT4SP1 - xadd early boot. */
13848 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13849#endif
13850#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13851 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13852#endif
13853#if 0 /* NT4SP1 - cmpxchg (AMD). */
13854 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13855#endif
13856#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13857 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13858#endif
13859#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13860 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13861
13862#endif
13863#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13864 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13865
13866#endif
13867#if 0 /* NT4SP1 - frstor [ecx] */
13868 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13869#endif
13870#if 0 /* xxxxxx - All long mode code. */
13871 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13872#endif
13873#if 0 /* rep movsq linux 3.7 64-bit boot. */
13874 || (pOrgCtx->rip == 0x0000000000100241)
13875#endif
13876#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13877 || (pOrgCtx->rip == 0x000000000215e240)
13878#endif
13879#if 0 /* DOS's size-overridden iret to v8086. */
13880 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13881#endif
13882 )
13883 )
13884 {
13885 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13886 RTLogFlags(NULL, "enabled");
13887 fNewNoRem = false;
13888 }
13889 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13890 {
13891 pVCpu->iem.s.fNoRem = fNewNoRem;
13892 if (!fNewNoRem)
13893 {
13894 LogAlways(("Enabling verification mode!\n"));
13895 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13896 }
13897 else
13898 LogAlways(("Disabling verification mode!\n"));
13899 }
13900
13901 /*
13902 * Switch state.
13903 */
13904 if (IEM_VERIFICATION_ENABLED(pVCpu))
13905 {
13906 static CPUMCTX s_DebugCtx; /* Ugly! */
13907
13908 s_DebugCtx = *pOrgCtx;
13909 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13910 }
13911
13912 /*
13913 * See if there is an interrupt pending in TRPM and inject it if we can.
13914 */
13915 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13916 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13917#if defined(VBOX_WITH_NESTED_HWVIRT)
13918 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13919 if (fIntrEnabled)
13920 {
13921 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13922 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13923 else
13924 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13925 }
13926#else
13927 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13928#endif
13929 if ( fIntrEnabled
13930 && TRPMHasTrap(pVCpu)
13931 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13932 {
13933 uint8_t u8TrapNo;
13934 TRPMEVENT enmType;
13935 RTGCUINT uErrCode;
13936 RTGCPTR uCr2;
13937 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13938 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13939 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13940 TRPMResetTrap(pVCpu);
13941 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13942 }
13943
13944 /*
13945 * Reset the counters.
13946 */
13947 pVCpu->iem.s.cIOReads = 0;
13948 pVCpu->iem.s.cIOWrites = 0;
13949 pVCpu->iem.s.fIgnoreRaxRdx = false;
13950 pVCpu->iem.s.fOverlappingMovs = false;
13951 pVCpu->iem.s.fProblematicMemory = false;
13952 pVCpu->iem.s.fUndefinedEFlags = 0;
13953
13954 if (IEM_VERIFICATION_ENABLED(pVCpu))
13955 {
13956 /*
13957 * Free all verification records.
13958 */
13959 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13960 pVCpu->iem.s.pIemEvtRecHead = NULL;
13961 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13962 do
13963 {
13964 while (pEvtRec)
13965 {
13966 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13967 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13968 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13969 pEvtRec = pNext;
13970 }
13971 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13972 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13973 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13974 } while (pEvtRec);
13975 }
13976}
13977
13978
13979/**
13980 * Allocate an event record.
13981 * @returns Pointer to a record.
13982 */
13983IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13984{
13985 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13986 return NULL;
13987
13988 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13989 if (pEvtRec)
13990 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13991 else
13992 {
13993 if (!pVCpu->iem.s.ppIemEvtRecNext)
13994 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13995
13996 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13997 if (!pEvtRec)
13998 return NULL;
13999 }
14000 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14001 pEvtRec->pNext = NULL;
14002 return pEvtRec;
14003}
14004
14005
14006/**
14007 * IOMMMIORead notification.
14008 */
14009VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14010{
14011 PVMCPU pVCpu = VMMGetCpu(pVM);
14012 if (!pVCpu)
14013 return;
14014 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14015 if (!pEvtRec)
14016 return;
14017 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14018 pEvtRec->u.RamRead.GCPhys = GCPhys;
14019 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14020 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14021 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14022}
14023
14024
14025/**
14026 * IOMMMIOWrite notification.
14027 */
14028VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14029{
14030 PVMCPU pVCpu = VMMGetCpu(pVM);
14031 if (!pVCpu)
14032 return;
14033 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14034 if (!pEvtRec)
14035 return;
14036 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14037 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14038 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14039 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14040 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14041 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14042 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14043 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14044 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14045}
14046
14047
14048/**
14049 * IOMIOPortRead notification.
14050 */
14051VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14052{
14053 PVMCPU pVCpu = VMMGetCpu(pVM);
14054 if (!pVCpu)
14055 return;
14056 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14057 if (!pEvtRec)
14058 return;
14059 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14060 pEvtRec->u.IOPortRead.Port = Port;
14061 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14062 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14063 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14064}
14065
14066/**
14067 * IOMIOPortWrite notification.
14068 */
14069VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14070{
14071 PVMCPU pVCpu = VMMGetCpu(pVM);
14072 if (!pVCpu)
14073 return;
14074 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14075 if (!pEvtRec)
14076 return;
14077 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14078 pEvtRec->u.IOPortWrite.Port = Port;
14079 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14080 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14081 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14082 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14083}
14084
14085
14086VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14087{
14088 PVMCPU pVCpu = VMMGetCpu(pVM);
14089 if (!pVCpu)
14090 return;
14091 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14092 if (!pEvtRec)
14093 return;
14094 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14095 pEvtRec->u.IOPortStrRead.Port = Port;
14096 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14097 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14098 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14099 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14100}
14101
14102
14103VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14104{
14105 PVMCPU pVCpu = VMMGetCpu(pVM);
14106 if (!pVCpu)
14107 return;
14108 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14109 if (!pEvtRec)
14110 return;
14111 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14112 pEvtRec->u.IOPortStrWrite.Port = Port;
14113 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14114 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14115 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14116 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14117}
14118
14119
14120/**
14121 * Fakes and records an I/O port read.
14122 *
14123 * @returns VINF_SUCCESS.
14124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14125 * @param Port The I/O port.
14126 * @param pu32Value Where to store the fake value.
14127 * @param cbValue The size of the access.
14128 */
14129IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14130{
14131 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14132 if (pEvtRec)
14133 {
14134 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14135 pEvtRec->u.IOPortRead.Port = Port;
14136 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14137 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14138 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14139 }
14140 pVCpu->iem.s.cIOReads++;
14141 *pu32Value = 0xcccccccc;
14142 return VINF_SUCCESS;
14143}
14144
14145
14146/**
14147 * Fakes and records an I/O port write.
14148 *
14149 * @returns VINF_SUCCESS.
14150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14151 * @param Port The I/O port.
14152 * @param u32Value The value being written.
14153 * @param cbValue The size of the access.
14154 */
14155IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14156{
14157 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14158 if (pEvtRec)
14159 {
14160 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14161 pEvtRec->u.IOPortWrite.Port = Port;
14162 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14163 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14164 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14165 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14166 }
14167 pVCpu->iem.s.cIOWrites++;
14168 return VINF_SUCCESS;
14169}
14170
14171
14172/**
14173 * Used to add extra details about a stub case.
14174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14175 */
14176IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14177{
14178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14179 PVM pVM = pVCpu->CTX_SUFF(pVM);
14180 PVMCPU pVCpu = pVCpu;
14181 char szRegs[4096];
14182 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14183 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14184 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14185 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14186 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14187 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14188 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14189 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14190 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14191 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14192 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14193 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14194 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14195 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14196 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14197 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14198 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14199 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14200 " efer=%016VR{efer}\n"
14201 " pat=%016VR{pat}\n"
14202 " sf_mask=%016VR{sf_mask}\n"
14203 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14204 " lstar=%016VR{lstar}\n"
14205 " star=%016VR{star} cstar=%016VR{cstar}\n"
14206 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14207 );
14208
14209 char szInstr1[256];
14210 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14211 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14212 szInstr1, sizeof(szInstr1), NULL);
14213 char szInstr2[256];
14214 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14215 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14216 szInstr2, sizeof(szInstr2), NULL);
14217
14218 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14219}
14220
14221
14222/**
14223 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14224 * dump to the assertion info.
14225 *
14226 * @param pEvtRec The record to dump.
14227 */
14228IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14229{
14230 switch (pEvtRec->enmEvent)
14231 {
14232 case IEMVERIFYEVENT_IOPORT_READ:
14233 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14234 pEvtRec->u.IOPortWrite.Port,
14235 pEvtRec->u.IOPortWrite.cbValue);
14236 break;
14237 case IEMVERIFYEVENT_IOPORT_WRITE:
14238 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14239 pEvtRec->u.IOPortWrite.Port,
14240 pEvtRec->u.IOPortWrite.cbValue,
14241 pEvtRec->u.IOPortWrite.u32Value);
14242 break;
14243 case IEMVERIFYEVENT_IOPORT_STR_READ:
14244 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14245 pEvtRec->u.IOPortStrWrite.Port,
14246 pEvtRec->u.IOPortStrWrite.cbValue,
14247 pEvtRec->u.IOPortStrWrite.cTransfers);
14248 break;
14249 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14250 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14251 pEvtRec->u.IOPortStrWrite.Port,
14252 pEvtRec->u.IOPortStrWrite.cbValue,
14253 pEvtRec->u.IOPortStrWrite.cTransfers);
14254 break;
14255 case IEMVERIFYEVENT_RAM_READ:
14256 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14257 pEvtRec->u.RamRead.GCPhys,
14258 pEvtRec->u.RamRead.cb);
14259 break;
14260 case IEMVERIFYEVENT_RAM_WRITE:
14261 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14262 pEvtRec->u.RamWrite.GCPhys,
14263 pEvtRec->u.RamWrite.cb,
14264 (int)pEvtRec->u.RamWrite.cb,
14265 pEvtRec->u.RamWrite.ab);
14266 break;
14267 default:
14268 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14269 break;
14270 }
14271}
14272
14273
14274/**
14275 * Raises an assertion on the specified record, showing the given message with
14276 * a record dump attached.
14277 *
14278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14279 * @param pEvtRec1 The first record.
14280 * @param pEvtRec2 The second record.
14281 * @param pszMsg The message explaining why we're asserting.
14282 */
14283IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14284{
14285 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14286 iemVerifyAssertAddRecordDump(pEvtRec1);
14287 iemVerifyAssertAddRecordDump(pEvtRec2);
14288 iemVerifyAssertMsg2(pVCpu);
14289 RTAssertPanic();
14290}
14291
14292
14293/**
14294 * Raises an assertion on the specified record, showing the given message with
14295 * a record dump attached.
14296 *
14297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14298 * @param pEvtRec1 The first record.
14299 * @param pszMsg The message explaining why we're asserting.
14300 */
14301IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14302{
14303 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14304 iemVerifyAssertAddRecordDump(pEvtRec);
14305 iemVerifyAssertMsg2(pVCpu);
14306 RTAssertPanic();
14307}
14308
14309
14310/**
14311 * Verifies a write record.
14312 *
14313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14314 * @param pEvtRec The write record.
14315 * @param fRem Set if REM was doing the other executing. If clear
14316 * it was HM.
14317 */
14318IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14319{
14320 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14321 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14322 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14323 if ( RT_FAILURE(rc)
14324 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14325 {
14326 /* fend off ins */
14327 if ( !pVCpu->iem.s.cIOReads
14328 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14329 || ( pEvtRec->u.RamWrite.cb != 1
14330 && pEvtRec->u.RamWrite.cb != 2
14331 && pEvtRec->u.RamWrite.cb != 4) )
14332 {
14333 /* fend off ROMs and MMIO */
14334 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14335 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14336 {
14337 /* fend off fxsave */
14338 if (pEvtRec->u.RamWrite.cb != 512)
14339 {
14340 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14341 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14342 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14343 RTAssertMsg2Add("%s: %.*Rhxs\n"
14344 "iem: %.*Rhxs\n",
14345 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14346 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14347 iemVerifyAssertAddRecordDump(pEvtRec);
14348 iemVerifyAssertMsg2(pVCpu);
14349 RTAssertPanic();
14350 }
14351 }
14352 }
14353 }
14354
14355}
14356
14357/**
14358 * Performs the post-execution verfication checks.
14359 */
14360IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14361{
14362 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14363 return rcStrictIem;
14364
14365 /*
14366 * Switch back the state.
14367 */
14368 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14369 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14370 Assert(pOrgCtx != pDebugCtx);
14371 IEM_GET_CTX(pVCpu) = pOrgCtx;
14372
14373 /*
14374 * Execute the instruction in REM.
14375 */
14376 bool fRem = false;
14377 PVM pVM = pVCpu->CTX_SUFF(pVM);
14378 PVMCPU pVCpu = pVCpu;
14379 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14380#ifdef IEM_VERIFICATION_MODE_FULL_HM
14381 if ( HMIsEnabled(pVM)
14382 && pVCpu->iem.s.cIOReads == 0
14383 && pVCpu->iem.s.cIOWrites == 0
14384 && !pVCpu->iem.s.fProblematicMemory)
14385 {
14386 uint64_t uStartRip = pOrgCtx->rip;
14387 unsigned iLoops = 0;
14388 do
14389 {
14390 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14391 iLoops++;
14392 } while ( rc == VINF_SUCCESS
14393 || ( rc == VINF_EM_DBG_STEPPED
14394 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14395 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14396 || ( pOrgCtx->rip != pDebugCtx->rip
14397 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14398 && iLoops < 8) );
14399 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14400 rc = VINF_SUCCESS;
14401 }
14402#endif
14403 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14404 || rc == VINF_IOM_R3_IOPORT_READ
14405 || rc == VINF_IOM_R3_IOPORT_WRITE
14406 || rc == VINF_IOM_R3_MMIO_READ
14407 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14408 || rc == VINF_IOM_R3_MMIO_WRITE
14409 || rc == VINF_CPUM_R3_MSR_READ
14410 || rc == VINF_CPUM_R3_MSR_WRITE
14411 || rc == VINF_EM_RESCHEDULE
14412 )
14413 {
14414 EMRemLock(pVM);
14415 rc = REMR3EmulateInstruction(pVM, pVCpu);
14416 AssertRC(rc);
14417 EMRemUnlock(pVM);
14418 fRem = true;
14419 }
14420
14421# if 1 /* Skip unimplemented instructions for now. */
14422 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14423 {
14424 IEM_GET_CTX(pVCpu) = pOrgCtx;
14425 if (rc == VINF_EM_DBG_STEPPED)
14426 return VINF_SUCCESS;
14427 return rc;
14428 }
14429# endif
14430
14431 /*
14432 * Compare the register states.
14433 */
14434 unsigned cDiffs = 0;
14435 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14436 {
14437 //Log(("REM and IEM ends up with different registers!\n"));
14438 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14439
14440# define CHECK_FIELD(a_Field) \
14441 do \
14442 { \
14443 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14444 { \
14445 switch (sizeof(pOrgCtx->a_Field)) \
14446 { \
14447 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14448 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14449 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14450 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14451 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14452 } \
14453 cDiffs++; \
14454 } \
14455 } while (0)
14456# define CHECK_XSTATE_FIELD(a_Field) \
14457 do \
14458 { \
14459 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14460 { \
14461 switch (sizeof(pOrgXState->a_Field)) \
14462 { \
14463 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14464 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14465 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14466 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14467 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14468 } \
14469 cDiffs++; \
14470 } \
14471 } while (0)
14472
14473# define CHECK_BIT_FIELD(a_Field) \
14474 do \
14475 { \
14476 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14477 { \
14478 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14479 cDiffs++; \
14480 } \
14481 } while (0)
14482
14483# define CHECK_SEL(a_Sel) \
14484 do \
14485 { \
14486 CHECK_FIELD(a_Sel.Sel); \
14487 CHECK_FIELD(a_Sel.Attr.u); \
14488 CHECK_FIELD(a_Sel.u64Base); \
14489 CHECK_FIELD(a_Sel.u32Limit); \
14490 CHECK_FIELD(a_Sel.fFlags); \
14491 } while (0)
14492
14493 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14494 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14495
14496#if 1 /* The recompiler doesn't update these the intel way. */
14497 if (fRem)
14498 {
14499 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14500 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14501 pOrgXState->x87.CS = pDebugXState->x87.CS;
14502 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14503 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14504 pOrgXState->x87.DS = pDebugXState->x87.DS;
14505 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14506 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14507 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14508 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14509 }
14510#endif
14511 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14512 {
14513 RTAssertMsg2Weak(" the FPU state differs\n");
14514 cDiffs++;
14515 CHECK_XSTATE_FIELD(x87.FCW);
14516 CHECK_XSTATE_FIELD(x87.FSW);
14517 CHECK_XSTATE_FIELD(x87.FTW);
14518 CHECK_XSTATE_FIELD(x87.FOP);
14519 CHECK_XSTATE_FIELD(x87.FPUIP);
14520 CHECK_XSTATE_FIELD(x87.CS);
14521 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14522 CHECK_XSTATE_FIELD(x87.FPUDP);
14523 CHECK_XSTATE_FIELD(x87.DS);
14524 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14525 CHECK_XSTATE_FIELD(x87.MXCSR);
14526 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14527 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14528 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14529 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14530 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14531 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14532 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14533 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14534 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14535 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14536 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14537 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14538 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14539 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14540 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14541 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14542 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14543 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14545 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14546 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14547 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14548 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14549 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14550 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14551 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14552 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14553 }
14554 CHECK_FIELD(rip);
14555 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14556 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14557 {
14558 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14559 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14560 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14561 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14562 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14563 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14564 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14565 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14566 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14567 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14568 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14569 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14570 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14571 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14572 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14573 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14574 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14575 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14576 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14577 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14578 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14579 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14580 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14581 }
14582
14583 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14584 CHECK_FIELD(rax);
14585 CHECK_FIELD(rcx);
14586 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14587 CHECK_FIELD(rdx);
14588 CHECK_FIELD(rbx);
14589 CHECK_FIELD(rsp);
14590 CHECK_FIELD(rbp);
14591 CHECK_FIELD(rsi);
14592 CHECK_FIELD(rdi);
14593 CHECK_FIELD(r8);
14594 CHECK_FIELD(r9);
14595 CHECK_FIELD(r10);
14596 CHECK_FIELD(r11);
14597 CHECK_FIELD(r12);
14598 CHECK_FIELD(r13);
14599 CHECK_SEL(cs);
14600 CHECK_SEL(ss);
14601 CHECK_SEL(ds);
14602 CHECK_SEL(es);
14603 CHECK_SEL(fs);
14604 CHECK_SEL(gs);
14605 CHECK_FIELD(cr0);
14606
14607 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14608 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14609 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14610 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14611 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14612 {
14613 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14614 { /* ignore */ }
14615 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14616 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14617 && fRem)
14618 { /* ignore */ }
14619 else
14620 CHECK_FIELD(cr2);
14621 }
14622 CHECK_FIELD(cr3);
14623 CHECK_FIELD(cr4);
14624 CHECK_FIELD(dr[0]);
14625 CHECK_FIELD(dr[1]);
14626 CHECK_FIELD(dr[2]);
14627 CHECK_FIELD(dr[3]);
14628 CHECK_FIELD(dr[6]);
14629 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14630 CHECK_FIELD(dr[7]);
14631 CHECK_FIELD(gdtr.cbGdt);
14632 CHECK_FIELD(gdtr.pGdt);
14633 CHECK_FIELD(idtr.cbIdt);
14634 CHECK_FIELD(idtr.pIdt);
14635 CHECK_SEL(ldtr);
14636 CHECK_SEL(tr);
14637 CHECK_FIELD(SysEnter.cs);
14638 CHECK_FIELD(SysEnter.eip);
14639 CHECK_FIELD(SysEnter.esp);
14640 CHECK_FIELD(msrEFER);
14641 CHECK_FIELD(msrSTAR);
14642 CHECK_FIELD(msrPAT);
14643 CHECK_FIELD(msrLSTAR);
14644 CHECK_FIELD(msrCSTAR);
14645 CHECK_FIELD(msrSFMASK);
14646 CHECK_FIELD(msrKERNELGSBASE);
14647
14648 if (cDiffs != 0)
14649 {
14650 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14651 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14652 RTAssertPanic();
14653 static bool volatile s_fEnterDebugger = true;
14654 if (s_fEnterDebugger)
14655 DBGFSTOP(pVM);
14656
14657# if 1 /* Ignore unimplemented instructions for now. */
14658 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14659 rcStrictIem = VINF_SUCCESS;
14660# endif
14661 }
14662# undef CHECK_FIELD
14663# undef CHECK_BIT_FIELD
14664 }
14665
14666 /*
14667 * If the register state compared fine, check the verification event
14668 * records.
14669 */
14670 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14671 {
14672 /*
14673 * Compare verficiation event records.
14674 * - I/O port accesses should be a 1:1 match.
14675 */
14676 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14677 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14678 while (pIemRec && pOtherRec)
14679 {
14680 /* Since we might miss RAM writes and reads, ignore reads and check
14681 that any written memory is the same extra ones. */
14682 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14683 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14684 && pIemRec->pNext)
14685 {
14686 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14687 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14688 pIemRec = pIemRec->pNext;
14689 }
14690
14691 /* Do the compare. */
14692 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14693 {
14694 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14695 break;
14696 }
14697 bool fEquals;
14698 switch (pIemRec->enmEvent)
14699 {
14700 case IEMVERIFYEVENT_IOPORT_READ:
14701 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14702 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14703 break;
14704 case IEMVERIFYEVENT_IOPORT_WRITE:
14705 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14706 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14707 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14708 break;
14709 case IEMVERIFYEVENT_IOPORT_STR_READ:
14710 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14711 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14712 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14713 break;
14714 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14715 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14716 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14717 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14718 break;
14719 case IEMVERIFYEVENT_RAM_READ:
14720 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14721 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14722 break;
14723 case IEMVERIFYEVENT_RAM_WRITE:
14724 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14725 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14726 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14727 break;
14728 default:
14729 fEquals = false;
14730 break;
14731 }
14732 if (!fEquals)
14733 {
14734 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14735 break;
14736 }
14737
14738 /* advance */
14739 pIemRec = pIemRec->pNext;
14740 pOtherRec = pOtherRec->pNext;
14741 }
14742
14743 /* Ignore extra writes and reads. */
14744 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14745 {
14746 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14747 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14748 pIemRec = pIemRec->pNext;
14749 }
14750 if (pIemRec != NULL)
14751 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14752 else if (pOtherRec != NULL)
14753 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14754 }
14755 IEM_GET_CTX(pVCpu) = pOrgCtx;
14756
14757 return rcStrictIem;
14758}
14759
14760#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14761
14762/* stubs */
14763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14764{
14765 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14766 return VERR_INTERNAL_ERROR;
14767}
14768
14769IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14770{
14771 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14772 return VERR_INTERNAL_ERROR;
14773}
14774
14775#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14776
14777
14778#ifdef LOG_ENABLED
14779/**
14780 * Logs the current instruction.
14781 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14782 * @param pCtx The current CPU context.
14783 * @param fSameCtx Set if we have the same context information as the VMM,
14784 * clear if we may have already executed an instruction in
14785 * our debug context. When clear, we assume IEMCPU holds
14786 * valid CPU mode info.
14787 */
14788IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14789{
14790# ifdef IN_RING3
14791 if (LogIs2Enabled())
14792 {
14793 char szInstr[256];
14794 uint32_t cbInstr = 0;
14795 if (fSameCtx)
14796 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14797 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14798 szInstr, sizeof(szInstr), &cbInstr);
14799 else
14800 {
14801 uint32_t fFlags = 0;
14802 switch (pVCpu->iem.s.enmCpuMode)
14803 {
14804 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14805 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14806 case IEMMODE_16BIT:
14807 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14808 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14809 else
14810 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14811 break;
14812 }
14813 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14814 szInstr, sizeof(szInstr), &cbInstr);
14815 }
14816
14817 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14818 Log2(("****\n"
14819 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14820 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14821 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14822 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14823 " %s\n"
14824 ,
14825 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14826 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14827 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14828 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14829 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14830 szInstr));
14831
14832 if (LogIs3Enabled())
14833 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14834 }
14835 else
14836# endif
14837 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14838 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14839 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14840}
14841#endif
14842
14843
14844/**
14845 * Makes status code addjustments (pass up from I/O and access handler)
14846 * as well as maintaining statistics.
14847 *
14848 * @returns Strict VBox status code to pass up.
14849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14850 * @param rcStrict The status from executing an instruction.
14851 */
14852DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14853{
14854 if (rcStrict != VINF_SUCCESS)
14855 {
14856 if (RT_SUCCESS(rcStrict))
14857 {
14858 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14859 || rcStrict == VINF_IOM_R3_IOPORT_READ
14860 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14861 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14862 || rcStrict == VINF_IOM_R3_MMIO_READ
14863 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14864 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14865 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14866 || rcStrict == VINF_CPUM_R3_MSR_READ
14867 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14868 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14869 || rcStrict == VINF_EM_RAW_TO_R3
14870 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14871 || rcStrict == VINF_EM_TRIPLE_FAULT
14872 /* raw-mode / virt handlers only: */
14873 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14874 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14875 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14876 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14877 || rcStrict == VINF_SELM_SYNC_GDT
14878 || rcStrict == VINF_CSAM_PENDING_ACTION
14879 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14880 /* nested hw.virt codes: */
14881 || rcStrict == VINF_SVM_VMEXIT
14882 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14883/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14884 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14885#ifdef VBOX_WITH_NESTED_HWVIRT
14886 if ( rcStrict == VINF_SVM_VMEXIT
14887 && rcPassUp == VINF_SUCCESS)
14888 rcStrict = VINF_SUCCESS;
14889 else
14890#endif
14891 if (rcPassUp == VINF_SUCCESS)
14892 pVCpu->iem.s.cRetInfStatuses++;
14893 else if ( rcPassUp < VINF_EM_FIRST
14894 || rcPassUp > VINF_EM_LAST
14895 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14896 {
14897 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14898 pVCpu->iem.s.cRetPassUpStatus++;
14899 rcStrict = rcPassUp;
14900 }
14901 else
14902 {
14903 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14904 pVCpu->iem.s.cRetInfStatuses++;
14905 }
14906 }
14907 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14908 pVCpu->iem.s.cRetAspectNotImplemented++;
14909 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14910 pVCpu->iem.s.cRetInstrNotImplemented++;
14911#ifdef IEM_VERIFICATION_MODE_FULL
14912 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14913 rcStrict = VINF_SUCCESS;
14914#endif
14915 else
14916 pVCpu->iem.s.cRetErrStatuses++;
14917 }
14918 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14919 {
14920 pVCpu->iem.s.cRetPassUpStatus++;
14921 rcStrict = pVCpu->iem.s.rcPassUp;
14922 }
14923
14924 return rcStrict;
14925}
14926
14927
14928/**
14929 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14930 * IEMExecOneWithPrefetchedByPC.
14931 *
14932 * Similar code is found in IEMExecLots.
14933 *
14934 * @return Strict VBox status code.
14935 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14937 * @param fExecuteInhibit If set, execute the instruction following CLI,
14938 * POP SS and MOV SS,GR.
14939 */
14940DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14941{
14942#ifdef IEM_WITH_SETJMP
14943 VBOXSTRICTRC rcStrict;
14944 jmp_buf JmpBuf;
14945 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14946 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14947 if ((rcStrict = setjmp(JmpBuf)) == 0)
14948 {
14949 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14950 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14951 }
14952 else
14953 pVCpu->iem.s.cLongJumps++;
14954 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14955#else
14956 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14957 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14958#endif
14959 if (rcStrict == VINF_SUCCESS)
14960 pVCpu->iem.s.cInstructions++;
14961 if (pVCpu->iem.s.cActiveMappings > 0)
14962 {
14963 Assert(rcStrict != VINF_SUCCESS);
14964 iemMemRollback(pVCpu);
14965 }
14966//#ifdef DEBUG
14967// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14968//#endif
14969
14970 /* Execute the next instruction as well if a cli, pop ss or
14971 mov ss, Gr has just completed successfully. */
14972 if ( fExecuteInhibit
14973 && rcStrict == VINF_SUCCESS
14974 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14975 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14976 {
14977 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14978 if (rcStrict == VINF_SUCCESS)
14979 {
14980#ifdef LOG_ENABLED
14981 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14982#endif
14983#ifdef IEM_WITH_SETJMP
14984 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14985 if ((rcStrict = setjmp(JmpBuf)) == 0)
14986 {
14987 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14988 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14989 }
14990 else
14991 pVCpu->iem.s.cLongJumps++;
14992 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14993#else
14994 IEM_OPCODE_GET_NEXT_U8(&b);
14995 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14996#endif
14997 if (rcStrict == VINF_SUCCESS)
14998 pVCpu->iem.s.cInstructions++;
14999 if (pVCpu->iem.s.cActiveMappings > 0)
15000 {
15001 Assert(rcStrict != VINF_SUCCESS);
15002 iemMemRollback(pVCpu);
15003 }
15004 }
15005 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15006 }
15007
15008 /*
15009 * Return value fiddling, statistics and sanity assertions.
15010 */
15011 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15012
15013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15015#if defined(IEM_VERIFICATION_MODE_FULL)
15016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15020#endif
15021 return rcStrict;
15022}
15023
15024
15025#ifdef IN_RC
15026/**
15027 * Re-enters raw-mode or ensure we return to ring-3.
15028 *
15029 * @returns rcStrict, maybe modified.
15030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15031 * @param pCtx The current CPU context.
15032 * @param rcStrict The status code returne by the interpreter.
15033 */
15034DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15035{
15036 if ( !pVCpu->iem.s.fInPatchCode
15037 && ( rcStrict == VINF_SUCCESS
15038 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15039 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15040 {
15041 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15042 CPUMRawEnter(pVCpu);
15043 else
15044 {
15045 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15046 rcStrict = VINF_EM_RESCHEDULE;
15047 }
15048 }
15049 return rcStrict;
15050}
15051#endif
15052
15053
15054/**
15055 * Execute one instruction.
15056 *
15057 * @return Strict VBox status code.
15058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15059 */
15060VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15061{
15062#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15063 if (++pVCpu->iem.s.cVerifyDepth == 1)
15064 iemExecVerificationModeSetup(pVCpu);
15065#endif
15066#ifdef LOG_ENABLED
15067 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15068 iemLogCurInstr(pVCpu, pCtx, true);
15069#endif
15070
15071 /*
15072 * Do the decoding and emulation.
15073 */
15074 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15075 if (rcStrict == VINF_SUCCESS)
15076 rcStrict = iemExecOneInner(pVCpu, true);
15077
15078#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15079 /*
15080 * Assert some sanity.
15081 */
15082 if (pVCpu->iem.s.cVerifyDepth == 1)
15083 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15084 pVCpu->iem.s.cVerifyDepth--;
15085#endif
15086#ifdef IN_RC
15087 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15088#endif
15089 if (rcStrict != VINF_SUCCESS)
15090 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15091 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15092 return rcStrict;
15093}
15094
15095
15096VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15097{
15098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15099 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15100
15101 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15102 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15103 if (rcStrict == VINF_SUCCESS)
15104 {
15105 rcStrict = iemExecOneInner(pVCpu, true);
15106 if (pcbWritten)
15107 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15108 }
15109
15110#ifdef IN_RC
15111 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15112#endif
15113 return rcStrict;
15114}
15115
15116
15117VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15118 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15119{
15120 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15121 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15122
15123 VBOXSTRICTRC rcStrict;
15124 if ( cbOpcodeBytes
15125 && pCtx->rip == OpcodeBytesPC)
15126 {
15127 iemInitDecoder(pVCpu, false);
15128#ifdef IEM_WITH_CODE_TLB
15129 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15130 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15131 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15132 pVCpu->iem.s.offCurInstrStart = 0;
15133 pVCpu->iem.s.offInstrNextByte = 0;
15134#else
15135 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15136 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15137#endif
15138 rcStrict = VINF_SUCCESS;
15139 }
15140 else
15141 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15142 if (rcStrict == VINF_SUCCESS)
15143 {
15144 rcStrict = iemExecOneInner(pVCpu, true);
15145 }
15146
15147#ifdef IN_RC
15148 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15149#endif
15150 return rcStrict;
15151}
15152
15153
15154VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15155{
15156 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15157 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15158
15159 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15160 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15161 if (rcStrict == VINF_SUCCESS)
15162 {
15163 rcStrict = iemExecOneInner(pVCpu, false);
15164 if (pcbWritten)
15165 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15166 }
15167
15168#ifdef IN_RC
15169 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15170#endif
15171 return rcStrict;
15172}
15173
15174
15175VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15176 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15177{
15178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15179 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15180
15181 VBOXSTRICTRC rcStrict;
15182 if ( cbOpcodeBytes
15183 && pCtx->rip == OpcodeBytesPC)
15184 {
15185 iemInitDecoder(pVCpu, true);
15186#ifdef IEM_WITH_CODE_TLB
15187 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15188 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15189 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15190 pVCpu->iem.s.offCurInstrStart = 0;
15191 pVCpu->iem.s.offInstrNextByte = 0;
15192#else
15193 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15194 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15195#endif
15196 rcStrict = VINF_SUCCESS;
15197 }
15198 else
15199 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15200 if (rcStrict == VINF_SUCCESS)
15201 rcStrict = iemExecOneInner(pVCpu, false);
15202
15203#ifdef IN_RC
15204 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15205#endif
15206 return rcStrict;
15207}
15208
15209
15210/**
15211 * For debugging DISGetParamSize, may come in handy.
15212 *
15213 * @returns Strict VBox status code.
15214 * @param pVCpu The cross context virtual CPU structure of the
15215 * calling EMT.
15216 * @param pCtxCore The context core structure.
15217 * @param OpcodeBytesPC The PC of the opcode bytes.
15218 * @param pvOpcodeBytes Prefeched opcode bytes.
15219 * @param cbOpcodeBytes Number of prefetched bytes.
15220 * @param pcbWritten Where to return the number of bytes written.
15221 * Optional.
15222 */
15223VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15224 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15225 uint32_t *pcbWritten)
15226{
15227 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15228 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15229
15230 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15231 VBOXSTRICTRC rcStrict;
15232 if ( cbOpcodeBytes
15233 && pCtx->rip == OpcodeBytesPC)
15234 {
15235 iemInitDecoder(pVCpu, true);
15236#ifdef IEM_WITH_CODE_TLB
15237 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15238 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15239 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15240 pVCpu->iem.s.offCurInstrStart = 0;
15241 pVCpu->iem.s.offInstrNextByte = 0;
15242#else
15243 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15244 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15245#endif
15246 rcStrict = VINF_SUCCESS;
15247 }
15248 else
15249 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15250 if (rcStrict == VINF_SUCCESS)
15251 {
15252 rcStrict = iemExecOneInner(pVCpu, false);
15253 if (pcbWritten)
15254 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15255 }
15256
15257#ifdef IN_RC
15258 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15259#endif
15260 return rcStrict;
15261}
15262
15263
15264VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15265{
15266 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15267
15268#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15269 /*
15270 * See if there is an interrupt pending in TRPM, inject it if we can.
15271 */
15272 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15273# ifdef IEM_VERIFICATION_MODE_FULL
15274 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15275# endif
15276
15277 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15278# if defined(VBOX_WITH_NESTED_HWVIRT)
15279 bool fIntrEnabled = pCtx->hwvirt.Gif;
15280 if (fIntrEnabled)
15281 {
15282 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15283 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15284 else
15285 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15286 }
15287# else
15288 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15289# endif
15290 if ( fIntrEnabled
15291 && TRPMHasTrap(pVCpu)
15292 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15293 {
15294 uint8_t u8TrapNo;
15295 TRPMEVENT enmType;
15296 RTGCUINT uErrCode;
15297 RTGCPTR uCr2;
15298 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15299 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15300 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15301 TRPMResetTrap(pVCpu);
15302 }
15303
15304 /*
15305 * Log the state.
15306 */
15307# ifdef LOG_ENABLED
15308 iemLogCurInstr(pVCpu, pCtx, true);
15309# endif
15310
15311 /*
15312 * Do the decoding and emulation.
15313 */
15314 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15315 if (rcStrict == VINF_SUCCESS)
15316 rcStrict = iemExecOneInner(pVCpu, true);
15317
15318 /*
15319 * Assert some sanity.
15320 */
15321 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15322
15323 /*
15324 * Log and return.
15325 */
15326 if (rcStrict != VINF_SUCCESS)
15327 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15328 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15329 if (pcInstructions)
15330 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15331 return rcStrict;
15332
15333#else /* Not verification mode */
15334
15335 /*
15336 * See if there is an interrupt pending in TRPM, inject it if we can.
15337 */
15338 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15339# ifdef IEM_VERIFICATION_MODE_FULL
15340 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15341# endif
15342
15343 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15344# if defined(VBOX_WITH_NESTED_HWVIRT)
15345 bool fIntrEnabled = pCtx->hwvirt.fGif;
15346 if (fIntrEnabled)
15347 {
15348 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15349 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15350 else
15351 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15352 }
15353# else
15354 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15355# endif
15356 if ( fIntrEnabled
15357 && TRPMHasTrap(pVCpu)
15358 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15359 {
15360 uint8_t u8TrapNo;
15361 TRPMEVENT enmType;
15362 RTGCUINT uErrCode;
15363 RTGCPTR uCr2;
15364 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15365 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15366 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15367 TRPMResetTrap(pVCpu);
15368 }
15369
15370 /*
15371 * Initial decoder init w/ prefetch, then setup setjmp.
15372 */
15373 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15374 if (rcStrict == VINF_SUCCESS)
15375 {
15376# ifdef IEM_WITH_SETJMP
15377 jmp_buf JmpBuf;
15378 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15379 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15380 pVCpu->iem.s.cActiveMappings = 0;
15381 if ((rcStrict = setjmp(JmpBuf)) == 0)
15382# endif
15383 {
15384 /*
15385 * The run loop. We limit ourselves to 4096 instructions right now.
15386 */
15387 PVM pVM = pVCpu->CTX_SUFF(pVM);
15388 uint32_t cInstr = 4096;
15389 for (;;)
15390 {
15391 /*
15392 * Log the state.
15393 */
15394# ifdef LOG_ENABLED
15395 iemLogCurInstr(pVCpu, pCtx, true);
15396# endif
15397
15398 /*
15399 * Do the decoding and emulation.
15400 */
15401 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15402 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15403 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15404 {
15405 Assert(pVCpu->iem.s.cActiveMappings == 0);
15406 pVCpu->iem.s.cInstructions++;
15407 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15408 {
15409 uint32_t fCpu = pVCpu->fLocalForcedActions
15410 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15411 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15412 | VMCPU_FF_TLB_FLUSH
15413# ifdef VBOX_WITH_RAW_MODE
15414 | VMCPU_FF_TRPM_SYNC_IDT
15415 | VMCPU_FF_SELM_SYNC_TSS
15416 | VMCPU_FF_SELM_SYNC_GDT
15417 | VMCPU_FF_SELM_SYNC_LDT
15418# endif
15419 | VMCPU_FF_INHIBIT_INTERRUPTS
15420 | VMCPU_FF_BLOCK_NMIS
15421 | VMCPU_FF_UNHALT ));
15422
15423 if (RT_LIKELY( ( !fCpu
15424 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15425 && !pCtx->rflags.Bits.u1IF) )
15426 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15427 {
15428 if (cInstr-- > 0)
15429 {
15430 Assert(pVCpu->iem.s.cActiveMappings == 0);
15431 iemReInitDecoder(pVCpu);
15432 continue;
15433 }
15434 }
15435 }
15436 Assert(pVCpu->iem.s.cActiveMappings == 0);
15437 }
15438 else if (pVCpu->iem.s.cActiveMappings > 0)
15439 iemMemRollback(pVCpu);
15440 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15441 break;
15442 }
15443 }
15444# ifdef IEM_WITH_SETJMP
15445 else
15446 {
15447 if (pVCpu->iem.s.cActiveMappings > 0)
15448 iemMemRollback(pVCpu);
15449 pVCpu->iem.s.cLongJumps++;
15450# ifdef VBOX_WITH_NESTED_HWVIRT
15451 /*
15452 * When a nested-guest causes an exception intercept when fetching memory
15453 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15454 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15455 */
15456 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15457# endif
15458 }
15459 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15460# endif
15461
15462 /*
15463 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15464 */
15465 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15466 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15467# if defined(IEM_VERIFICATION_MODE_FULL)
15468 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15469 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15470 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15471 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15472# endif
15473 }
15474# ifdef VBOX_WITH_NESTED_HWVIRT
15475 else
15476 {
15477 /*
15478 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15479 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15480 */
15481 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15482 }
15483# endif
15484
15485 /*
15486 * Maybe re-enter raw-mode and log.
15487 */
15488# ifdef IN_RC
15489 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15490# endif
15491 if (rcStrict != VINF_SUCCESS)
15492 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15493 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15494 if (pcInstructions)
15495 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15496 return rcStrict;
15497#endif /* Not verification mode */
15498}
15499
15500
15501
15502/**
15503 * Injects a trap, fault, abort, software interrupt or external interrupt.
15504 *
15505 * The parameter list matches TRPMQueryTrapAll pretty closely.
15506 *
15507 * @returns Strict VBox status code.
15508 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15509 * @param u8TrapNo The trap number.
15510 * @param enmType What type is it (trap/fault/abort), software
15511 * interrupt or hardware interrupt.
15512 * @param uErrCode The error code if applicable.
15513 * @param uCr2 The CR2 value if applicable.
15514 * @param cbInstr The instruction length (only relevant for
15515 * software interrupts).
15516 */
15517VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15518 uint8_t cbInstr)
15519{
15520 iemInitDecoder(pVCpu, false);
15521#ifdef DBGFTRACE_ENABLED
15522 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15523 u8TrapNo, enmType, uErrCode, uCr2);
15524#endif
15525
15526 uint32_t fFlags;
15527 switch (enmType)
15528 {
15529 case TRPM_HARDWARE_INT:
15530 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15531 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15532 uErrCode = uCr2 = 0;
15533 break;
15534
15535 case TRPM_SOFTWARE_INT:
15536 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15537 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15538 uErrCode = uCr2 = 0;
15539 break;
15540
15541 case TRPM_TRAP:
15542 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15543 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15544 if (u8TrapNo == X86_XCPT_PF)
15545 fFlags |= IEM_XCPT_FLAGS_CR2;
15546 switch (u8TrapNo)
15547 {
15548 case X86_XCPT_DF:
15549 case X86_XCPT_TS:
15550 case X86_XCPT_NP:
15551 case X86_XCPT_SS:
15552 case X86_XCPT_PF:
15553 case X86_XCPT_AC:
15554 fFlags |= IEM_XCPT_FLAGS_ERR;
15555 break;
15556
15557 case X86_XCPT_NMI:
15558 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15559 break;
15560 }
15561 break;
15562
15563 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15564 }
15565
15566 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15567}
15568
15569
15570/**
15571 * Injects the active TRPM event.
15572 *
15573 * @returns Strict VBox status code.
15574 * @param pVCpu The cross context virtual CPU structure.
15575 */
15576VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15577{
15578#ifndef IEM_IMPLEMENTS_TASKSWITCH
15579 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15580#else
15581 uint8_t u8TrapNo;
15582 TRPMEVENT enmType;
15583 RTGCUINT uErrCode;
15584 RTGCUINTPTR uCr2;
15585 uint8_t cbInstr;
15586 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15587 if (RT_FAILURE(rc))
15588 return rc;
15589
15590 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15591
15592 /** @todo Are there any other codes that imply the event was successfully
15593 * delivered to the guest? See @bugref{6607}. */
15594 if ( rcStrict == VINF_SUCCESS
15595 || rcStrict == VINF_IEM_RAISED_XCPT)
15596 {
15597 TRPMResetTrap(pVCpu);
15598 }
15599 return rcStrict;
15600#endif
15601}
15602
15603
15604VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15605{
15606 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15607 return VERR_NOT_IMPLEMENTED;
15608}
15609
15610
15611VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15612{
15613 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15614 return VERR_NOT_IMPLEMENTED;
15615}
15616
15617
15618#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15619/**
15620 * Executes a IRET instruction with default operand size.
15621 *
15622 * This is for PATM.
15623 *
15624 * @returns VBox status code.
15625 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15626 * @param pCtxCore The register frame.
15627 */
15628VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15629{
15630 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15631
15632 iemCtxCoreToCtx(pCtx, pCtxCore);
15633 iemInitDecoder(pVCpu);
15634 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15635 if (rcStrict == VINF_SUCCESS)
15636 iemCtxToCtxCore(pCtxCore, pCtx);
15637 else
15638 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15639 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15640 return rcStrict;
15641}
15642#endif
15643
15644
15645/**
15646 * Macro used by the IEMExec* method to check the given instruction length.
15647 *
15648 * Will return on failure!
15649 *
15650 * @param a_cbInstr The given instruction length.
15651 * @param a_cbMin The minimum length.
15652 */
15653#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15654 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15655 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15656
15657
15658/**
15659 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15660 *
15661 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15662 *
15663 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15665 * @param rcStrict The status code to fiddle.
15666 */
15667DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15668{
15669 iemUninitExec(pVCpu);
15670#ifdef IN_RC
15671 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15672 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15673#else
15674 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15675#endif
15676}
15677
15678
15679/**
15680 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15681 *
15682 * This API ASSUMES that the caller has already verified that the guest code is
15683 * allowed to access the I/O port. (The I/O port is in the DX register in the
15684 * guest state.)
15685 *
15686 * @returns Strict VBox status code.
15687 * @param pVCpu The cross context virtual CPU structure.
15688 * @param cbValue The size of the I/O port access (1, 2, or 4).
15689 * @param enmAddrMode The addressing mode.
15690 * @param fRepPrefix Indicates whether a repeat prefix is used
15691 * (doesn't matter which for this instruction).
15692 * @param cbInstr The instruction length in bytes.
15693 * @param iEffSeg The effective segment address.
15694 * @param fIoChecked Whether the access to the I/O port has been
15695 * checked or not. It's typically checked in the
15696 * HM scenario.
15697 */
15698VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15699 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15700{
15701 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15702 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15703
15704 /*
15705 * State init.
15706 */
15707 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15708
15709 /*
15710 * Switch orgy for getting to the right handler.
15711 */
15712 VBOXSTRICTRC rcStrict;
15713 if (fRepPrefix)
15714 {
15715 switch (enmAddrMode)
15716 {
15717 case IEMMODE_16BIT:
15718 switch (cbValue)
15719 {
15720 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15721 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15722 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15723 default:
15724 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15725 }
15726 break;
15727
15728 case IEMMODE_32BIT:
15729 switch (cbValue)
15730 {
15731 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15732 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15733 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15734 default:
15735 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15736 }
15737 break;
15738
15739 case IEMMODE_64BIT:
15740 switch (cbValue)
15741 {
15742 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15743 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15744 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15745 default:
15746 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15747 }
15748 break;
15749
15750 default:
15751 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15752 }
15753 }
15754 else
15755 {
15756 switch (enmAddrMode)
15757 {
15758 case IEMMODE_16BIT:
15759 switch (cbValue)
15760 {
15761 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15762 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15763 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15764 default:
15765 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15766 }
15767 break;
15768
15769 case IEMMODE_32BIT:
15770 switch (cbValue)
15771 {
15772 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15773 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15774 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15775 default:
15776 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15777 }
15778 break;
15779
15780 case IEMMODE_64BIT:
15781 switch (cbValue)
15782 {
15783 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15784 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15785 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15786 default:
15787 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15788 }
15789 break;
15790
15791 default:
15792 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15793 }
15794 }
15795
15796 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15797}
15798
15799
15800/**
15801 * Interface for HM and EM for executing string I/O IN (read) instructions.
15802 *
15803 * This API ASSUMES that the caller has already verified that the guest code is
15804 * allowed to access the I/O port. (The I/O port is in the DX register in the
15805 * guest state.)
15806 *
15807 * @returns Strict VBox status code.
15808 * @param pVCpu The cross context virtual CPU structure.
15809 * @param cbValue The size of the I/O port access (1, 2, or 4).
15810 * @param enmAddrMode The addressing mode.
15811 * @param fRepPrefix Indicates whether a repeat prefix is used
15812 * (doesn't matter which for this instruction).
15813 * @param cbInstr The instruction length in bytes.
15814 * @param fIoChecked Whether the access to the I/O port has been
15815 * checked or not. It's typically checked in the
15816 * HM scenario.
15817 */
15818VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15819 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15820{
15821 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15822
15823 /*
15824 * State init.
15825 */
15826 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15827
15828 /*
15829 * Switch orgy for getting to the right handler.
15830 */
15831 VBOXSTRICTRC rcStrict;
15832 if (fRepPrefix)
15833 {
15834 switch (enmAddrMode)
15835 {
15836 case IEMMODE_16BIT:
15837 switch (cbValue)
15838 {
15839 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15840 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15841 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15842 default:
15843 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15844 }
15845 break;
15846
15847 case IEMMODE_32BIT:
15848 switch (cbValue)
15849 {
15850 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15851 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15852 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15853 default:
15854 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15855 }
15856 break;
15857
15858 case IEMMODE_64BIT:
15859 switch (cbValue)
15860 {
15861 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15862 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15863 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15864 default:
15865 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15866 }
15867 break;
15868
15869 default:
15870 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15871 }
15872 }
15873 else
15874 {
15875 switch (enmAddrMode)
15876 {
15877 case IEMMODE_16BIT:
15878 switch (cbValue)
15879 {
15880 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15881 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15882 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15883 default:
15884 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15885 }
15886 break;
15887
15888 case IEMMODE_32BIT:
15889 switch (cbValue)
15890 {
15891 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15892 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15893 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15894 default:
15895 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15896 }
15897 break;
15898
15899 case IEMMODE_64BIT:
15900 switch (cbValue)
15901 {
15902 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15903 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15904 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15905 default:
15906 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15907 }
15908 break;
15909
15910 default:
15911 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15912 }
15913 }
15914
15915 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15916}
15917
15918
15919/**
15920 * Interface for rawmode to write execute an OUT instruction.
15921 *
15922 * @returns Strict VBox status code.
15923 * @param pVCpu The cross context virtual CPU structure.
15924 * @param cbInstr The instruction length in bytes.
15925 * @param u16Port The port to read.
15926 * @param cbReg The register size.
15927 *
15928 * @remarks In ring-0 not all of the state needs to be synced in.
15929 */
15930VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15931{
15932 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15933 Assert(cbReg <= 4 && cbReg != 3);
15934
15935 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15936 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15938}
15939
15940
15941/**
15942 * Interface for rawmode to write execute an IN instruction.
15943 *
15944 * @returns Strict VBox status code.
15945 * @param pVCpu The cross context virtual CPU structure.
15946 * @param cbInstr The instruction length in bytes.
15947 * @param u16Port The port to read.
15948 * @param cbReg The register size.
15949 */
15950VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15951{
15952 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15953 Assert(cbReg <= 4 && cbReg != 3);
15954
15955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15958}
15959
15960
15961/**
15962 * Interface for HM and EM to write to a CRx register.
15963 *
15964 * @returns Strict VBox status code.
15965 * @param pVCpu The cross context virtual CPU structure.
15966 * @param cbInstr The instruction length in bytes.
15967 * @param iCrReg The control register number (destination).
15968 * @param iGReg The general purpose register number (source).
15969 *
15970 * @remarks In ring-0 not all of the state needs to be synced in.
15971 */
15972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15973{
15974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15975 Assert(iCrReg < 16);
15976 Assert(iGReg < 16);
15977
15978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15980 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15981}
15982
15983
15984/**
15985 * Interface for HM and EM to read from a CRx register.
15986 *
15987 * @returns Strict VBox status code.
15988 * @param pVCpu The cross context virtual CPU structure.
15989 * @param cbInstr The instruction length in bytes.
15990 * @param iGReg The general purpose register number (destination).
15991 * @param iCrReg The control register number (source).
15992 *
15993 * @remarks In ring-0 not all of the state needs to be synced in.
15994 */
15995VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15996{
15997 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15998 Assert(iCrReg < 16);
15999 Assert(iGReg < 16);
16000
16001 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16002 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16003 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16004}
16005
16006
16007/**
16008 * Interface for HM and EM to clear the CR0[TS] bit.
16009 *
16010 * @returns Strict VBox status code.
16011 * @param pVCpu The cross context virtual CPU structure.
16012 * @param cbInstr The instruction length in bytes.
16013 *
16014 * @remarks In ring-0 not all of the state needs to be synced in.
16015 */
16016VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16017{
16018 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16019
16020 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16021 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16022 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16023}
16024
16025
16026/**
16027 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16028 *
16029 * @returns Strict VBox status code.
16030 * @param pVCpu The cross context virtual CPU structure.
16031 * @param cbInstr The instruction length in bytes.
16032 * @param uValue The value to load into CR0.
16033 *
16034 * @remarks In ring-0 not all of the state needs to be synced in.
16035 */
16036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16037{
16038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16039
16040 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16041 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16042 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16043}
16044
16045
16046/**
16047 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16048 *
16049 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16050 *
16051 * @returns Strict VBox status code.
16052 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16053 * @param cbInstr The instruction length in bytes.
16054 * @remarks In ring-0 not all of the state needs to be synced in.
16055 * @thread EMT(pVCpu)
16056 */
16057VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16058{
16059 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16060
16061 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16062 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16063 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16064}
16065
16066
16067/**
16068 * Interface for HM and EM to emulate the INVLPG instruction.
16069 *
16070 * @param pVCpu The cross context virtual CPU structure.
16071 * @param cbInstr The instruction length in bytes.
16072 * @param GCPtrPage The effective address of the page to invalidate.
16073 *
16074 * @remarks In ring-0 not all of the state needs to be synced in.
16075 */
16076VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16077{
16078 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16079
16080 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16081 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16083}
16084
16085
16086/**
16087 * Interface for HM and EM to emulate the INVPCID instruction.
16088 *
16089 * @param pVCpu The cross context virtual CPU structure.
16090 * @param cbInstr The instruction length in bytes.
16091 * @param uType The invalidation type.
16092 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16093 *
16094 * @remarks In ring-0 not all of the state needs to be synced in.
16095 */
16096VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16097{
16098 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16099
16100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16101 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16103}
16104
16105
16106/**
16107 * Checks if IEM is in the process of delivering an event (interrupt or
16108 * exception).
16109 *
16110 * @returns true if we're in the process of raising an interrupt or exception,
16111 * false otherwise.
16112 * @param pVCpu The cross context virtual CPU structure.
16113 * @param puVector Where to store the vector associated with the
16114 * currently delivered event, optional.
16115 * @param pfFlags Where to store th event delivery flags (see
16116 * IEM_XCPT_FLAGS_XXX), optional.
16117 * @param puErr Where to store the error code associated with the
16118 * event, optional.
16119 * @param puCr2 Where to store the CR2 associated with the event,
16120 * optional.
16121 * @remarks The caller should check the flags to determine if the error code and
16122 * CR2 are valid for the event.
16123 */
16124VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16125{
16126 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16127 if (fRaisingXcpt)
16128 {
16129 if (puVector)
16130 *puVector = pVCpu->iem.s.uCurXcpt;
16131 if (pfFlags)
16132 *pfFlags = pVCpu->iem.s.fCurXcpt;
16133 if (puErr)
16134 *puErr = pVCpu->iem.s.uCurXcptErr;
16135 if (puCr2)
16136 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16137 }
16138 return fRaisingXcpt;
16139}
16140
16141#ifdef VBOX_WITH_NESTED_HWVIRT
16142/**
16143 * Interface for HM and EM to emulate the CLGI instruction.
16144 *
16145 * @returns Strict VBox status code.
16146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16147 * @param cbInstr The instruction length in bytes.
16148 * @thread EMT(pVCpu)
16149 */
16150VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16151{
16152 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16153
16154 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16155 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16156 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16157}
16158
16159
16160/**
16161 * Interface for HM and EM to emulate the STGI instruction.
16162 *
16163 * @returns Strict VBox status code.
16164 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16165 * @param cbInstr The instruction length in bytes.
16166 * @thread EMT(pVCpu)
16167 */
16168VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16169{
16170 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16171
16172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16174 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16175}
16176
16177
16178/**
16179 * Interface for HM and EM to emulate the VMLOAD instruction.
16180 *
16181 * @returns Strict VBox status code.
16182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16183 * @param cbInstr The instruction length in bytes.
16184 * @thread EMT(pVCpu)
16185 */
16186VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16187{
16188 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16189
16190 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16191 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16193}
16194
16195
16196/**
16197 * Interface for HM and EM to emulate the VMSAVE instruction.
16198 *
16199 * @returns Strict VBox status code.
16200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16201 * @param cbInstr The instruction length in bytes.
16202 * @thread EMT(pVCpu)
16203 */
16204VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16205{
16206 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16207
16208 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16210 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16211}
16212
16213
16214/**
16215 * Interface for HM and EM to emulate the INVLPGA instruction.
16216 *
16217 * @returns Strict VBox status code.
16218 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16219 * @param cbInstr The instruction length in bytes.
16220 * @thread EMT(pVCpu)
16221 */
16222VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16223{
16224 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16225
16226 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16228 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16229}
16230
16231
16232/**
16233 * Interface for HM and EM to emulate the VMRUN instruction.
16234 *
16235 * @returns Strict VBox status code.
16236 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16237 * @param cbInstr The instruction length in bytes.
16238 * @thread EMT(pVCpu)
16239 */
16240VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16241{
16242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16243
16244 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16245 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16246 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16247}
16248
16249
16250/**
16251 * Interface for HM and EM to emulate \#VMEXIT.
16252 *
16253 * @returns Strict VBox status code.
16254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16255 * @param uExitCode The exit code.
16256 * @param uExitInfo1 The exit info. 1 field.
16257 * @param uExitInfo2 The exit info. 2 field.
16258 * @thread EMT(pVCpu)
16259 */
16260VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16261{
16262 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16263 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16264}
16265#endif /* VBOX_WITH_NESTED_HWVIRT */
16266
16267#ifdef IN_RING3
16268
16269/**
16270 * Handles the unlikely and probably fatal merge cases.
16271 *
16272 * @returns Merged status code.
16273 * @param rcStrict Current EM status code.
16274 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16275 * with @a rcStrict.
16276 * @param iMemMap The memory mapping index. For error reporting only.
16277 * @param pVCpu The cross context virtual CPU structure of the calling
16278 * thread, for error reporting only.
16279 */
16280DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16281 unsigned iMemMap, PVMCPU pVCpu)
16282{
16283 if (RT_FAILURE_NP(rcStrict))
16284 return rcStrict;
16285
16286 if (RT_FAILURE_NP(rcStrictCommit))
16287 return rcStrictCommit;
16288
16289 if (rcStrict == rcStrictCommit)
16290 return rcStrictCommit;
16291
16292 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16293 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16294 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16295 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16297 return VERR_IOM_FF_STATUS_IPE;
16298}
16299
16300
16301/**
16302 * Helper for IOMR3ProcessForceFlag.
16303 *
16304 * @returns Merged status code.
16305 * @param rcStrict Current EM status code.
16306 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16307 * with @a rcStrict.
16308 * @param iMemMap The memory mapping index. For error reporting only.
16309 * @param pVCpu The cross context virtual CPU structure of the calling
16310 * thread, for error reporting only.
16311 */
16312DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16313{
16314 /* Simple. */
16315 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16316 return rcStrictCommit;
16317
16318 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16319 return rcStrict;
16320
16321 /* EM scheduling status codes. */
16322 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16323 && rcStrict <= VINF_EM_LAST))
16324 {
16325 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16326 && rcStrictCommit <= VINF_EM_LAST))
16327 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16328 }
16329
16330 /* Unlikely */
16331 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16332}
16333
16334
16335/**
16336 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16337 *
16338 * @returns Merge between @a rcStrict and what the commit operation returned.
16339 * @param pVM The cross context VM structure.
16340 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16341 * @param rcStrict The status code returned by ring-0 or raw-mode.
16342 */
16343VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16344{
16345 /*
16346 * Reset the pending commit.
16347 */
16348 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16349 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16350 ("%#x %#x %#x\n",
16351 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16352 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16353
16354 /*
16355 * Commit the pending bounce buffers (usually just one).
16356 */
16357 unsigned cBufs = 0;
16358 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16359 while (iMemMap-- > 0)
16360 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16361 {
16362 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16363 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16364 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16365
16366 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16367 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16368 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16369
16370 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16371 {
16372 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16374 pbBuf,
16375 cbFirst,
16376 PGMACCESSORIGIN_IEM);
16377 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16378 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16379 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16380 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16381 }
16382
16383 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16384 {
16385 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16387 pbBuf + cbFirst,
16388 cbSecond,
16389 PGMACCESSORIGIN_IEM);
16390 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16391 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16392 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16393 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16394 }
16395 cBufs++;
16396 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16397 }
16398
16399 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16400 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16401 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16402 pVCpu->iem.s.cActiveMappings = 0;
16403 return rcStrict;
16404}
16405
16406#endif /* IN_RING3 */
16407
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette