VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 69046

Last change on this file since 69046 was 69046, checked in by vboxsync, 7 years ago

Global: replace fall-through comments with RT_FALL_THRU().
bugref:8192: gcc warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 635.2 KB
Line 
1/* $Id: IEMAll.cpp 69046 2017-10-11 16:11:23Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889
890#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
891IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
892#endif
893IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
895
896#ifdef VBOX_WITH_NESTED_HWVIRT
897IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
898 uint64_t uExitInfo2);
899IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
900 uint32_t uErr, uint64_t uCr2);
901#endif
902
903/**
904 * Sets the pass up status.
905 *
906 * @returns VINF_SUCCESS.
907 * @param pVCpu The cross context virtual CPU structure of the
908 * calling thread.
909 * @param rcPassUp The pass up status. Must be informational.
910 * VINF_SUCCESS is not allowed.
911 */
912IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
913{
914 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
915
916 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
917 if (rcOldPassUp == VINF_SUCCESS)
918 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
919 /* If both are EM scheduling codes, use EM priority rules. */
920 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
921 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
922 {
923 if (rcPassUp < rcOldPassUp)
924 {
925 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
926 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
927 }
928 else
929 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
930 }
931 /* Override EM scheduling with specific status code. */
932 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
933 {
934 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 }
937 /* Don't override specific status code, first come first served. */
938 else
939 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
940 return VINF_SUCCESS;
941}
942
943
944/**
945 * Calculates the CPU mode.
946 *
947 * This is mainly for updating IEMCPU::enmCpuMode.
948 *
949 * @returns CPU mode.
950 * @param pCtx The register context for the CPU.
951 */
952DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
953{
954 if (CPUMIsGuestIn64BitCodeEx(pCtx))
955 return IEMMODE_64BIT;
956 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
957 return IEMMODE_32BIT;
958 return IEMMODE_16BIT;
959}
960
961
962/**
963 * Initializes the execution state.
964 *
965 * @param pVCpu The cross context virtual CPU structure of the
966 * calling thread.
967 * @param fBypassHandlers Whether to bypass access handlers.
968 *
969 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
970 * side-effects in strict builds.
971 */
972DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
973{
974 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
975
976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
977
978#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
987#endif
988
989#ifdef VBOX_WITH_RAW_MODE_NOT_R0
990 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
991#endif
992 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
993 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
994#ifdef VBOX_STRICT
995 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
996 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1000 pVCpu->iem.s.uRexReg = 127;
1001 pVCpu->iem.s.uRexB = 127;
1002 pVCpu->iem.s.uRexIndex = 127;
1003 pVCpu->iem.s.iEffSeg = 127;
1004 pVCpu->iem.s.idxPrefix = 127;
1005 pVCpu->iem.s.uVex3rdReg = 127;
1006 pVCpu->iem.s.uVexLength = 127;
1007 pVCpu->iem.s.fEvexStuff = 127;
1008 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1009# ifdef IEM_WITH_CODE_TLB
1010 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1011 pVCpu->iem.s.pbInstrBuf = NULL;
1012 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1013 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1014 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1015 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1016# else
1017 pVCpu->iem.s.offOpcode = 127;
1018 pVCpu->iem.s.cbOpcode = 127;
1019# endif
1020#endif
1021
1022 pVCpu->iem.s.cActiveMappings = 0;
1023 pVCpu->iem.s.iNextMapping = 0;
1024 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1025 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1026#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1027 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1028 && pCtx->cs.u64Base == 0
1029 && pCtx->cs.u32Limit == UINT32_MAX
1030 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1031 if (!pVCpu->iem.s.fInPatchCode)
1032 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1033#endif
1034
1035#ifdef IEM_VERIFICATION_MODE_FULL
1036 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1037 pVCpu->iem.s.fNoRem = true;
1038#endif
1039}
1040
1041#ifdef VBOX_WITH_NESTED_HWVIRT
1042/**
1043 * Performs a minimal reinitialization of the execution state.
1044 *
1045 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1046 * 'world-switch' types operations on the CPU. Currently only nested
1047 * hardware-virtualization uses it.
1048 *
1049 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1050 */
1051IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1052{
1053 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1054 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1055 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1056
1057 pVCpu->iem.s.uCpl = uCpl;
1058 pVCpu->iem.s.enmCpuMode = enmMode;
1059 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1060 pVCpu->iem.s.enmEffAddrMode = enmMode;
1061 if (enmMode != IEMMODE_64BIT)
1062 {
1063 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1064 pVCpu->iem.s.enmEffOpSize = enmMode;
1065 }
1066 else
1067 {
1068 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1069 pVCpu->iem.s.enmEffOpSize = enmMode;
1070 }
1071 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1072#ifndef IEM_WITH_CODE_TLB
1073 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1074 pVCpu->iem.s.offOpcode = 0;
1075 pVCpu->iem.s.cbOpcode = 0;
1076#endif
1077}
1078#endif
1079
1080/**
1081 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1082 *
1083 * @param pVCpu The cross context virtual CPU structure of the
1084 * calling thread.
1085 */
1086DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1087{
1088 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1089#ifdef IEM_VERIFICATION_MODE_FULL
1090 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1091#endif
1092#ifdef VBOX_STRICT
1093# ifdef IEM_WITH_CODE_TLB
1094 NOREF(pVCpu);
1095# else
1096 pVCpu->iem.s.cbOpcode = 0;
1097# endif
1098#else
1099 NOREF(pVCpu);
1100#endif
1101}
1102
1103
1104/**
1105 * Initializes the decoder state.
1106 *
1107 * iemReInitDecoder is mostly a copy of this function.
1108 *
1109 * @param pVCpu The cross context virtual CPU structure of the
1110 * calling thread.
1111 * @param fBypassHandlers Whether to bypass access handlers.
1112 */
1113DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1114{
1115 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1116
1117 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1118
1119#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1128#endif
1129
1130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1131 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1132#endif
1133 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1134#ifdef IEM_VERIFICATION_MODE_FULL
1135 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1136 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1137#endif
1138 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1139 pVCpu->iem.s.enmCpuMode = enmMode;
1140 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1141 pVCpu->iem.s.enmEffAddrMode = enmMode;
1142 if (enmMode != IEMMODE_64BIT)
1143 {
1144 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffOpSize = enmMode;
1146 }
1147 else
1148 {
1149 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1150 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1151 }
1152 pVCpu->iem.s.fPrefixes = 0;
1153 pVCpu->iem.s.uRexReg = 0;
1154 pVCpu->iem.s.uRexB = 0;
1155 pVCpu->iem.s.uRexIndex = 0;
1156 pVCpu->iem.s.idxPrefix = 0;
1157 pVCpu->iem.s.uVex3rdReg = 0;
1158 pVCpu->iem.s.uVexLength = 0;
1159 pVCpu->iem.s.fEvexStuff = 0;
1160 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1161#ifdef IEM_WITH_CODE_TLB
1162 pVCpu->iem.s.pbInstrBuf = NULL;
1163 pVCpu->iem.s.offInstrNextByte = 0;
1164 pVCpu->iem.s.offCurInstrStart = 0;
1165# ifdef VBOX_STRICT
1166 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1167 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1168 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1169# endif
1170#else
1171 pVCpu->iem.s.offOpcode = 0;
1172 pVCpu->iem.s.cbOpcode = 0;
1173#endif
1174 pVCpu->iem.s.cActiveMappings = 0;
1175 pVCpu->iem.s.iNextMapping = 0;
1176 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1177 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1178#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1179 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1180 && pCtx->cs.u64Base == 0
1181 && pCtx->cs.u32Limit == UINT32_MAX
1182 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1183 if (!pVCpu->iem.s.fInPatchCode)
1184 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1185#endif
1186
1187#ifdef DBGFTRACE_ENABLED
1188 switch (enmMode)
1189 {
1190 case IEMMODE_64BIT:
1191 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1192 break;
1193 case IEMMODE_32BIT:
1194 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1195 break;
1196 case IEMMODE_16BIT:
1197 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1198 break;
1199 }
1200#endif
1201}
1202
1203
1204/**
1205 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1206 *
1207 * This is mostly a copy of iemInitDecoder.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1210 */
1211DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1212{
1213 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1214
1215 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1216
1217#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1226#endif
1227
1228 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1229#ifdef IEM_VERIFICATION_MODE_FULL
1230 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1231 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1232#endif
1233 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1234 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1235 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1236 pVCpu->iem.s.enmEffAddrMode = enmMode;
1237 if (enmMode != IEMMODE_64BIT)
1238 {
1239 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffOpSize = enmMode;
1241 }
1242 else
1243 {
1244 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1245 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1246 }
1247 pVCpu->iem.s.fPrefixes = 0;
1248 pVCpu->iem.s.uRexReg = 0;
1249 pVCpu->iem.s.uRexB = 0;
1250 pVCpu->iem.s.uRexIndex = 0;
1251 pVCpu->iem.s.idxPrefix = 0;
1252 pVCpu->iem.s.uVex3rdReg = 0;
1253 pVCpu->iem.s.uVexLength = 0;
1254 pVCpu->iem.s.fEvexStuff = 0;
1255 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1256#ifdef IEM_WITH_CODE_TLB
1257 if (pVCpu->iem.s.pbInstrBuf)
1258 {
1259 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1260 - pVCpu->iem.s.uInstrBufPc;
1261 if (off < pVCpu->iem.s.cbInstrBufTotal)
1262 {
1263 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1264 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1265 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1266 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1267 else
1268 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1269 }
1270 else
1271 {
1272 pVCpu->iem.s.pbInstrBuf = NULL;
1273 pVCpu->iem.s.offInstrNextByte = 0;
1274 pVCpu->iem.s.offCurInstrStart = 0;
1275 pVCpu->iem.s.cbInstrBuf = 0;
1276 pVCpu->iem.s.cbInstrBufTotal = 0;
1277 }
1278 }
1279 else
1280 {
1281 pVCpu->iem.s.offInstrNextByte = 0;
1282 pVCpu->iem.s.offCurInstrStart = 0;
1283 pVCpu->iem.s.cbInstrBuf = 0;
1284 pVCpu->iem.s.cbInstrBufTotal = 0;
1285 }
1286#else
1287 pVCpu->iem.s.cbOpcode = 0;
1288 pVCpu->iem.s.offOpcode = 0;
1289#endif
1290 Assert(pVCpu->iem.s.cActiveMappings == 0);
1291 pVCpu->iem.s.iNextMapping = 0;
1292 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1293 Assert(pVCpu->iem.s.fBypassHandlers == false);
1294#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1295 if (!pVCpu->iem.s.fInPatchCode)
1296 { /* likely */ }
1297 else
1298 {
1299 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1300 && pCtx->cs.u64Base == 0
1301 && pCtx->cs.u32Limit == UINT32_MAX
1302 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1303 if (!pVCpu->iem.s.fInPatchCode)
1304 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1305 }
1306#endif
1307
1308#ifdef DBGFTRACE_ENABLED
1309 switch (enmMode)
1310 {
1311 case IEMMODE_64BIT:
1312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1313 break;
1314 case IEMMODE_32BIT:
1315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1316 break;
1317 case IEMMODE_16BIT:
1318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1319 break;
1320 }
1321#endif
1322}
1323
1324
1325
1326/**
1327 * Prefetch opcodes the first time when starting executing.
1328 *
1329 * @returns Strict VBox status code.
1330 * @param pVCpu The cross context virtual CPU structure of the
1331 * calling thread.
1332 * @param fBypassHandlers Whether to bypass access handlers.
1333 */
1334IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1335{
1336#ifdef IEM_VERIFICATION_MODE_FULL
1337 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1338#endif
1339 iemInitDecoder(pVCpu, fBypassHandlers);
1340
1341#ifdef IEM_WITH_CODE_TLB
1342 /** @todo Do ITLB lookup here. */
1343
1344#else /* !IEM_WITH_CODE_TLB */
1345
1346 /*
1347 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1348 *
1349 * First translate CS:rIP to a physical address.
1350 */
1351 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1352 uint32_t cbToTryRead;
1353 RTGCPTR GCPtrPC;
1354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1355 {
1356 cbToTryRead = PAGE_SIZE;
1357 GCPtrPC = pCtx->rip;
1358 if (IEM_IS_CANONICAL(GCPtrPC))
1359 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1360 else
1361 return iemRaiseGeneralProtectionFault0(pVCpu);
1362 }
1363 else
1364 {
1365 uint32_t GCPtrPC32 = pCtx->eip;
1366 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1367 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1368 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1369 else
1370 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1371 if (cbToTryRead) { /* likely */ }
1372 else /* overflowed */
1373 {
1374 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1375 cbToTryRead = UINT32_MAX;
1376 }
1377 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1378 Assert(GCPtrPC <= UINT32_MAX);
1379 }
1380
1381# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1382 /* Allow interpretation of patch manager code blocks since they can for
1383 instance throw #PFs for perfectly good reasons. */
1384 if (pVCpu->iem.s.fInPatchCode)
1385 {
1386 size_t cbRead = 0;
1387 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1388 AssertRCReturn(rc, rc);
1389 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1390 return VINF_SUCCESS;
1391 }
1392# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1393
1394 RTGCPHYS GCPhys;
1395 uint64_t fFlags;
1396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1397 if (RT_SUCCESS(rc)) { /* probable */ }
1398 else
1399 {
1400 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1401 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1402 }
1403 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1404 else
1405 {
1406 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1407 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1408 }
1409 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1410 else
1411 {
1412 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1413 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1414 }
1415 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1416 /** @todo Check reserved bits and such stuff. PGM is better at doing
1417 * that, so do it when implementing the guest virtual address
1418 * TLB... */
1419
1420# ifdef IEM_VERIFICATION_MODE_FULL
1421 /*
1422 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1423 * instruction.
1424 */
1425 /** @todo optimize this differently by not using PGMPhysRead. */
1426 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1427 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1428 if ( offPrevOpcodes < cbOldOpcodes
1429 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1430 {
1431 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1432 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1433 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1434 pVCpu->iem.s.cbOpcode = cbNew;
1435 return VINF_SUCCESS;
1436 }
1437# endif
1438
1439 /*
1440 * Read the bytes at this address.
1441 */
1442 PVM pVM = pVCpu->CTX_SUFF(pVM);
1443# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1444 size_t cbActual;
1445 if ( PATMIsEnabled(pVM)
1446 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1447 {
1448 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1449 Assert(cbActual > 0);
1450 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1451 }
1452 else
1453# endif
1454 {
1455 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1456 if (cbToTryRead > cbLeftOnPage)
1457 cbToTryRead = cbLeftOnPage;
1458 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1459 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1460
1461 if (!pVCpu->iem.s.fBypassHandlers)
1462 {
1463 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1464 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1465 { /* likely */ }
1466 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1467 {
1468 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1469 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1470 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1471 }
1472 else
1473 {
1474 Log((RT_SUCCESS(rcStrict)
1475 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1476 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1477 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1478 return rcStrict;
1479 }
1480 }
1481 else
1482 {
1483 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1484 if (RT_SUCCESS(rc))
1485 { /* likely */ }
1486 else
1487 {
1488 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1489 GCPtrPC, GCPhys, rc, cbToTryRead));
1490 return rc;
1491 }
1492 }
1493 pVCpu->iem.s.cbOpcode = cbToTryRead;
1494 }
1495#endif /* !IEM_WITH_CODE_TLB */
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Invalidates the IEM TLBs.
1502 *
1503 * This is called internally as well as by PGM when moving GC mappings.
1504 *
1505 * @returns
1506 * @param pVCpu The cross context virtual CPU structure of the calling
1507 * thread.
1508 * @param fVmm Set when PGM calls us with a remapping.
1509 */
1510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1511{
1512#ifdef IEM_WITH_CODE_TLB
1513 pVCpu->iem.s.cbInstrBufTotal = 0;
1514 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1515 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1516 { /* very likely */ }
1517 else
1518 {
1519 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1520 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1521 while (i-- > 0)
1522 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1523 }
1524#endif
1525
1526#ifdef IEM_WITH_DATA_TLB
1527 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1528 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1529 { /* very likely */ }
1530 else
1531 {
1532 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1533 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1534 while (i-- > 0)
1535 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1536 }
1537#endif
1538 NOREF(pVCpu); NOREF(fVmm);
1539}
1540
1541
1542/**
1543 * Invalidates a page in the TLBs.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure of the calling
1546 * thread.
1547 * @param GCPtr The address of the page to invalidate
1548 */
1549VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1550{
1551#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1552 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1553 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1555 uintptr_t idx = (uint8_t)GCPtr;
1556
1557# ifdef IEM_WITH_CODE_TLB
1558 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1559 {
1560 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1561 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1562 pVCpu->iem.s.cbInstrBufTotal = 0;
1563 }
1564# endif
1565
1566# ifdef IEM_WITH_DATA_TLB
1567 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1568 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1569# endif
1570#else
1571 NOREF(pVCpu); NOREF(GCPtr);
1572#endif
1573}
1574
1575
1576/**
1577 * Invalidates the host physical aspects of the IEM TLBs.
1578 *
1579 * This is called internally as well as by PGM when moving GC mappings.
1580 *
1581 * @param pVCpu The cross context virtual CPU structure of the calling
1582 * thread.
1583 */
1584VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1585{
1586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1587 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1588
1589# ifdef IEM_WITH_CODE_TLB
1590 pVCpu->iem.s.cbInstrBufTotal = 0;
1591# endif
1592 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1593 if (uTlbPhysRev != 0)
1594 {
1595 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1596 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1597 }
1598 else
1599 {
1600 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1601 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602
1603 unsigned i;
1604# ifdef IEM_WITH_CODE_TLB
1605 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1606 while (i-- > 0)
1607 {
1608 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1609 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1610 }
1611# endif
1612# ifdef IEM_WITH_DATA_TLB
1613 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1614 while (i-- > 0)
1615 {
1616 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1617 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1618 }
1619# endif
1620 }
1621#else
1622 NOREF(pVCpu);
1623#endif
1624}
1625
1626
1627/**
1628 * Invalidates the host physical aspects of the IEM TLBs.
1629 *
1630 * This is called internally as well as by PGM when moving GC mappings.
1631 *
1632 * @param pVM The cross context VM structure.
1633 *
1634 * @remarks Caller holds the PGM lock.
1635 */
1636VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1637{
1638 RT_NOREF_PV(pVM);
1639}
1640
1641#ifdef IEM_WITH_CODE_TLB
1642
1643/**
1644 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1645 * failure and jumps.
1646 *
1647 * We end up here for a number of reasons:
1648 * - pbInstrBuf isn't yet initialized.
1649 * - Advancing beyond the buffer boundrary (e.g. cross page).
1650 * - Advancing beyond the CS segment limit.
1651 * - Fetching from non-mappable page (e.g. MMIO).
1652 *
1653 * @param pVCpu The cross context virtual CPU structure of the
1654 * calling thread.
1655 * @param pvDst Where to return the bytes.
1656 * @param cbDst Number of bytes to read.
1657 *
1658 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1659 */
1660IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1661{
1662#ifdef IN_RING3
1663//__debugbreak();
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1696 RTGCPTR GCPtrFirst;
1697 uint32_t cbMaxRead;
1698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1699 {
1700 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1701 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1702 { /* likely */ }
1703 else
1704 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1705 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1706 }
1707 else
1708 {
1709 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1710 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1711 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1712 { /* likely */ }
1713 else
1714 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1715 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1716 if (cbMaxRead != 0)
1717 { /* likely */ }
1718 else
1719 {
1720 /* Overflowed because address is 0 and limit is max. */
1721 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1722 cbMaxRead = X86_PAGE_SIZE;
1723 }
1724 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1725 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1726 if (cbMaxRead2 < cbMaxRead)
1727 cbMaxRead = cbMaxRead2;
1728 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1729 }
1730
1731 /*
1732 * Get the TLB entry for this piece of code.
1733 */
1734 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1735 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1736 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1737 if (pTlbe->uTag == uTag)
1738 {
1739 /* likely when executing lots of code, otherwise unlikely */
1740# ifdef VBOX_WITH_STATISTICS
1741 pVCpu->iem.s.CodeTlb.cTlbHits++;
1742# endif
1743 }
1744 else
1745 {
1746 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1747# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1748 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1749 {
1750 pTlbe->uTag = uTag;
1751 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1752 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1753 pTlbe->GCPhys = NIL_RTGCPHYS;
1754 pTlbe->pbMappingR3 = NULL;
1755 }
1756 else
1757# endif
1758 {
1759 RTGCPHYS GCPhys;
1760 uint64_t fFlags;
1761 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1762 if (RT_FAILURE(rc))
1763 {
1764 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1765 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1766 }
1767
1768 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1769 pTlbe->uTag = uTag;
1770 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1771 pTlbe->GCPhys = GCPhys;
1772 pTlbe->pbMappingR3 = NULL;
1773 }
1774 }
1775
1776 /*
1777 * Check TLB page table level access flags.
1778 */
1779 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1780 {
1781 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1782 {
1783 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1784 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1785 }
1786 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1787 {
1788 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1789 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1790 }
1791 }
1792
1793# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1794 /*
1795 * Allow interpretation of patch manager code blocks since they can for
1796 * instance throw #PFs for perfectly good reasons.
1797 */
1798 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1799 { /* no unlikely */ }
1800 else
1801 {
1802 /** @todo Could be optimized this a little in ring-3 if we liked. */
1803 size_t cbRead = 0;
1804 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1805 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1806 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1807 return;
1808 }
1809# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1810
1811 /*
1812 * Look up the physical page info if necessary.
1813 */
1814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1815 { /* not necessary */ }
1816 else
1817 {
1818 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1821 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1822 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1823 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1824 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1825 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1826 }
1827
1828# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1829 /*
1830 * Try do a direct read using the pbMappingR3 pointer.
1831 */
1832 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1833 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1834 {
1835 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1836 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1837 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1838 {
1839 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1840 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1841 }
1842 else
1843 {
1844 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1845 Assert(cbInstr < cbMaxRead);
1846 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1847 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1848 }
1849 if (cbDst <= cbMaxRead)
1850 {
1851 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1852 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1853 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1854 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1855 return;
1856 }
1857 pVCpu->iem.s.pbInstrBuf = NULL;
1858
1859 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1860 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1861 }
1862 else
1863# endif
1864#if 0
1865 /*
1866 * If there is no special read handling, so we can read a bit more and
1867 * put it in the prefetch buffer.
1868 */
1869 if ( cbDst < cbMaxRead
1870 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1871 {
1872 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1873 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1874 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1875 { /* likely */ }
1876 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1877 {
1878 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1879 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1880 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1881 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1882 }
1883 else
1884 {
1885 Log((RT_SUCCESS(rcStrict)
1886 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1887 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1888 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1889 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1890 }
1891 }
1892 /*
1893 * Special read handling, so only read exactly what's needed.
1894 * This is a highly unlikely scenario.
1895 */
1896 else
1897#endif
1898 {
1899 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1900 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1901 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1902 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1903 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1904 { /* likely */ }
1905 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1906 {
1907 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1908 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1909 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1910 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1911 }
1912 else
1913 {
1914 Log((RT_SUCCESS(rcStrict)
1915 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1916 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1917 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1918 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1919 }
1920 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1921 if (cbToRead == cbDst)
1922 return;
1923 }
1924
1925 /*
1926 * More to read, loop.
1927 */
1928 cbDst -= cbMaxRead;
1929 pvDst = (uint8_t *)pvDst + cbMaxRead;
1930 }
1931#else
1932 RT_NOREF(pvDst, cbDst);
1933 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1934#endif
1935}
1936
1937#else
1938
1939/**
1940 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1941 * exception if it fails.
1942 *
1943 * @returns Strict VBox status code.
1944 * @param pVCpu The cross context virtual CPU structure of the
1945 * calling thread.
1946 * @param cbMin The minimum number of bytes relative offOpcode
1947 * that must be read.
1948 */
1949IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1950{
1951 /*
1952 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1953 *
1954 * First translate CS:rIP to a physical address.
1955 */
1956 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1957 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1958 uint32_t cbToTryRead;
1959 RTGCPTR GCPtrNext;
1960 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1961 {
1962 cbToTryRead = PAGE_SIZE;
1963 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1964 if (!IEM_IS_CANONICAL(GCPtrNext))
1965 return iemRaiseGeneralProtectionFault0(pVCpu);
1966 }
1967 else
1968 {
1969 uint32_t GCPtrNext32 = pCtx->eip;
1970 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1971 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1972 if (GCPtrNext32 > pCtx->cs.u32Limit)
1973 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1974 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1975 if (!cbToTryRead) /* overflowed */
1976 {
1977 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1978 cbToTryRead = UINT32_MAX;
1979 /** @todo check out wrapping around the code segment. */
1980 }
1981 if (cbToTryRead < cbMin - cbLeft)
1982 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1983 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1984 }
1985
1986 /* Only read up to the end of the page, and make sure we don't read more
1987 than the opcode buffer can hold. */
1988 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1989 if (cbToTryRead > cbLeftOnPage)
1990 cbToTryRead = cbLeftOnPage;
1991 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1992 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1993/** @todo r=bird: Convert assertion into undefined opcode exception? */
1994 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1995
1996# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1997 /* Allow interpretation of patch manager code blocks since they can for
1998 instance throw #PFs for perfectly good reasons. */
1999 if (pVCpu->iem.s.fInPatchCode)
2000 {
2001 size_t cbRead = 0;
2002 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2003 AssertRCReturn(rc, rc);
2004 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2005 return VINF_SUCCESS;
2006 }
2007# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2008
2009 RTGCPHYS GCPhys;
2010 uint64_t fFlags;
2011 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2012 if (RT_FAILURE(rc))
2013 {
2014 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2015 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2016 }
2017 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2018 {
2019 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2020 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2021 }
2022 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2023 {
2024 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2025 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2026 }
2027 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2028 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2029 /** @todo Check reserved bits and such stuff. PGM is better at doing
2030 * that, so do it when implementing the guest virtual address
2031 * TLB... */
2032
2033 /*
2034 * Read the bytes at this address.
2035 *
2036 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2037 * and since PATM should only patch the start of an instruction there
2038 * should be no need to check again here.
2039 */
2040 if (!pVCpu->iem.s.fBypassHandlers)
2041 {
2042 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2043 cbToTryRead, PGMACCESSORIGIN_IEM);
2044 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2045 { /* likely */ }
2046 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2047 {
2048 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2049 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2050 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2051 }
2052 else
2053 {
2054 Log((RT_SUCCESS(rcStrict)
2055 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2056 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2057 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2058 return rcStrict;
2059 }
2060 }
2061 else
2062 {
2063 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2064 if (RT_SUCCESS(rc))
2065 { /* likely */ }
2066 else
2067 {
2068 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2069 return rc;
2070 }
2071 }
2072 pVCpu->iem.s.cbOpcode += cbToTryRead;
2073 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2074
2075 return VINF_SUCCESS;
2076}
2077
2078#endif /* !IEM_WITH_CODE_TLB */
2079#ifndef IEM_WITH_SETJMP
2080
2081/**
2082 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2083 *
2084 * @returns Strict VBox status code.
2085 * @param pVCpu The cross context virtual CPU structure of the
2086 * calling thread.
2087 * @param pb Where to return the opcode byte.
2088 */
2089DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2090{
2091 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2092 if (rcStrict == VINF_SUCCESS)
2093 {
2094 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2095 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2096 pVCpu->iem.s.offOpcode = offOpcode + 1;
2097 }
2098 else
2099 *pb = 0;
2100 return rcStrict;
2101}
2102
2103
2104/**
2105 * Fetches the next opcode byte.
2106 *
2107 * @returns Strict VBox status code.
2108 * @param pVCpu The cross context virtual CPU structure of the
2109 * calling thread.
2110 * @param pu8 Where to return the opcode byte.
2111 */
2112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2113{
2114 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2115 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2116 {
2117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2118 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2119 return VINF_SUCCESS;
2120 }
2121 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2122}
2123
2124#else /* IEM_WITH_SETJMP */
2125
2126/**
2127 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2128 *
2129 * @returns The opcode byte.
2130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2131 */
2132DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2133{
2134# ifdef IEM_WITH_CODE_TLB
2135 uint8_t u8;
2136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2137 return u8;
2138# else
2139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2140 if (rcStrict == VINF_SUCCESS)
2141 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2142 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2143# endif
2144}
2145
2146
2147/**
2148 * Fetches the next opcode byte, longjmp on error.
2149 *
2150 * @returns The opcode byte.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 */
2153DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2154{
2155# ifdef IEM_WITH_CODE_TLB
2156 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2157 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2158 if (RT_LIKELY( pbBuf != NULL
2159 && offBuf < pVCpu->iem.s.cbInstrBuf))
2160 {
2161 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2162 return pbBuf[offBuf];
2163 }
2164# else
2165 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2166 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2167 {
2168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2169 return pVCpu->iem.s.abOpcode[offOpcode];
2170 }
2171# endif
2172 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2173}
2174
2175#endif /* IEM_WITH_SETJMP */
2176
2177/**
2178 * Fetches the next opcode byte, returns automatically on failure.
2179 *
2180 * @param a_pu8 Where to return the opcode byte.
2181 * @remark Implicitly references pVCpu.
2182 */
2183#ifndef IEM_WITH_SETJMP
2184# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2185 do \
2186 { \
2187 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2188 if (rcStrict2 == VINF_SUCCESS) \
2189 { /* likely */ } \
2190 else \
2191 return rcStrict2; \
2192 } while (0)
2193#else
2194# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2195#endif /* IEM_WITH_SETJMP */
2196
2197
2198#ifndef IEM_WITH_SETJMP
2199/**
2200 * Fetches the next signed byte from the opcode stream.
2201 *
2202 * @returns Strict VBox status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param pi8 Where to return the signed byte.
2205 */
2206DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2207{
2208 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2209}
2210#endif /* !IEM_WITH_SETJMP */
2211
2212
2213/**
2214 * Fetches the next signed byte from the opcode stream, returning automatically
2215 * on failure.
2216 *
2217 * @param a_pi8 Where to return the signed byte.
2218 * @remark Implicitly references pVCpu.
2219 */
2220#ifndef IEM_WITH_SETJMP
2221# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2222 do \
2223 { \
2224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2225 if (rcStrict2 != VINF_SUCCESS) \
2226 return rcStrict2; \
2227 } while (0)
2228#else /* IEM_WITH_SETJMP */
2229# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2230
2231#endif /* IEM_WITH_SETJMP */
2232
2233#ifndef IEM_WITH_SETJMP
2234
2235/**
2236 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2237 *
2238 * @returns Strict VBox status code.
2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2240 * @param pu16 Where to return the opcode dword.
2241 */
2242DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2243{
2244 uint8_t u8;
2245 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2246 if (rcStrict == VINF_SUCCESS)
2247 *pu16 = (int8_t)u8;
2248 return rcStrict;
2249}
2250
2251
2252/**
2253 * Fetches the next signed byte from the opcode stream, extending it to
2254 * unsigned 16-bit.
2255 *
2256 * @returns Strict VBox status code.
2257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2258 * @param pu16 Where to return the unsigned word.
2259 */
2260DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2261{
2262 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2263 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2264 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2265
2266 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2267 pVCpu->iem.s.offOpcode = offOpcode + 1;
2268 return VINF_SUCCESS;
2269}
2270
2271#endif /* !IEM_WITH_SETJMP */
2272
2273/**
2274 * Fetches the next signed byte from the opcode stream and sign-extending it to
2275 * a word, returning automatically on failure.
2276 *
2277 * @param a_pu16 Where to return the word.
2278 * @remark Implicitly references pVCpu.
2279 */
2280#ifndef IEM_WITH_SETJMP
2281# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2282 do \
2283 { \
2284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2285 if (rcStrict2 != VINF_SUCCESS) \
2286 return rcStrict2; \
2287 } while (0)
2288#else
2289# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2290#endif
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu32 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu32 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 32-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu32 Where to return the unsigned dword.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2324
2325 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu32 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu64 Where to return the opcode qword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu64 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 64-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu64 Where to return the unsigned qword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2383
2384 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391
2392/**
2393 * Fetches the next signed byte from the opcode stream and sign-extending it to
2394 * a word, returning automatically on failure.
2395 *
2396 * @param a_pu64 Where to return the word.
2397 * @remark Implicitly references pVCpu.
2398 */
2399#ifndef IEM_WITH_SETJMP
2400# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2401 do \
2402 { \
2403 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2404 if (rcStrict2 != VINF_SUCCESS) \
2405 return rcStrict2; \
2406 } while (0)
2407#else
2408# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2409#endif
2410
2411
2412#ifndef IEM_WITH_SETJMP
2413
2414/**
2415 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2416 *
2417 * @returns Strict VBox status code.
2418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2419 * @param pu16 Where to return the opcode word.
2420 */
2421DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2422{
2423 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2424 if (rcStrict == VINF_SUCCESS)
2425 {
2426 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2427# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2428 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2429# else
2430 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2431# endif
2432 pVCpu->iem.s.offOpcode = offOpcode + 2;
2433 }
2434 else
2435 *pu16 = 0;
2436 return rcStrict;
2437}
2438
2439
2440/**
2441 * Fetches the next opcode word.
2442 *
2443 * @returns Strict VBox status code.
2444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2445 * @param pu16 Where to return the opcode word.
2446 */
2447DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2448{
2449 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2450 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2451 {
2452 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2453# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2454 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2455# else
2456 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2457# endif
2458 return VINF_SUCCESS;
2459 }
2460 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2461}
2462
2463#else /* IEM_WITH_SETJMP */
2464
2465/**
2466 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2467 *
2468 * @returns The opcode word.
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 */
2471DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2472{
2473# ifdef IEM_WITH_CODE_TLB
2474 uint16_t u16;
2475 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2476 return u16;
2477# else
2478 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2479 if (rcStrict == VINF_SUCCESS)
2480 {
2481 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2482 pVCpu->iem.s.offOpcode += 2;
2483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2484 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2485# else
2486 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2487# endif
2488 }
2489 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2490# endif
2491}
2492
2493
2494/**
2495 * Fetches the next opcode word, longjmp on error.
2496 *
2497 * @returns The opcode word.
2498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2499 */
2500DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2501{
2502# ifdef IEM_WITH_CODE_TLB
2503 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2509# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2510 return *(uint16_t const *)&pbBuf[offBuf];
2511# else
2512 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2513# endif
2514 }
2515# else
2516 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2517 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2518 {
2519 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2520# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2521 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2522# else
2523 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2524# endif
2525 }
2526# endif
2527 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2528}
2529
2530#endif /* IEM_WITH_SETJMP */
2531
2532
2533/**
2534 * Fetches the next opcode word, returns automatically on failure.
2535 *
2536 * @param a_pu16 Where to return the opcode word.
2537 * @remark Implicitly references pVCpu.
2538 */
2539#ifndef IEM_WITH_SETJMP
2540# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2541 do \
2542 { \
2543 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2544 if (rcStrict2 != VINF_SUCCESS) \
2545 return rcStrict2; \
2546 } while (0)
2547#else
2548# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2549#endif
2550
2551#ifndef IEM_WITH_SETJMP
2552
2553/**
2554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2555 *
2556 * @returns Strict VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2558 * @param pu32 Where to return the opcode double word.
2559 */
2560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2561{
2562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2563 if (rcStrict == VINF_SUCCESS)
2564 {
2565 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2566 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567 pVCpu->iem.s.offOpcode = offOpcode + 2;
2568 }
2569 else
2570 *pu32 = 0;
2571 return rcStrict;
2572}
2573
2574
2575/**
2576 * Fetches the next opcode word, zero extending it to a double word.
2577 *
2578 * @returns Strict VBox status code.
2579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2580 * @param pu32 Where to return the opcode double word.
2581 */
2582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2583{
2584 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2585 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2586 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2587
2588 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2589 pVCpu->iem.s.offOpcode = offOpcode + 2;
2590 return VINF_SUCCESS;
2591}
2592
2593#endif /* !IEM_WITH_SETJMP */
2594
2595
2596/**
2597 * Fetches the next opcode word and zero extends it to a double word, returns
2598 * automatically on failure.
2599 *
2600 * @param a_pu32 Where to return the opcode double word.
2601 * @remark Implicitly references pVCpu.
2602 */
2603#ifndef IEM_WITH_SETJMP
2604# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2605 do \
2606 { \
2607 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2608 if (rcStrict2 != VINF_SUCCESS) \
2609 return rcStrict2; \
2610 } while (0)
2611#else
2612# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2613#endif
2614
2615#ifndef IEM_WITH_SETJMP
2616
2617/**
2618 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2619 *
2620 * @returns Strict VBox status code.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param pu64 Where to return the opcode quad word.
2623 */
2624DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2625{
2626 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2627 if (rcStrict == VINF_SUCCESS)
2628 {
2629 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2630 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2631 pVCpu->iem.s.offOpcode = offOpcode + 2;
2632 }
2633 else
2634 *pu64 = 0;
2635 return rcStrict;
2636}
2637
2638
2639/**
2640 * Fetches the next opcode word, zero extending it to a quad word.
2641 *
2642 * @returns Strict VBox status code.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 * @param pu64 Where to return the opcode quad word.
2645 */
2646DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2647{
2648 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2649 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2650 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2651
2652 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2653 pVCpu->iem.s.offOpcode = offOpcode + 2;
2654 return VINF_SUCCESS;
2655}
2656
2657#endif /* !IEM_WITH_SETJMP */
2658
2659/**
2660 * Fetches the next opcode word and zero extends it to a quad word, returns
2661 * automatically on failure.
2662 *
2663 * @param a_pu64 Where to return the opcode quad word.
2664 * @remark Implicitly references pVCpu.
2665 */
2666#ifndef IEM_WITH_SETJMP
2667# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2668 do \
2669 { \
2670 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2671 if (rcStrict2 != VINF_SUCCESS) \
2672 return rcStrict2; \
2673 } while (0)
2674#else
2675# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2676#endif
2677
2678
2679#ifndef IEM_WITH_SETJMP
2680/**
2681 * Fetches the next signed word from the opcode stream.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param pi16 Where to return the signed word.
2686 */
2687DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2688{
2689 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2690}
2691#endif /* !IEM_WITH_SETJMP */
2692
2693
2694/**
2695 * Fetches the next signed word from the opcode stream, returning automatically
2696 * on failure.
2697 *
2698 * @param a_pi16 Where to return the signed word.
2699 * @remark Implicitly references pVCpu.
2700 */
2701#ifndef IEM_WITH_SETJMP
2702# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2703 do \
2704 { \
2705 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2706 if (rcStrict2 != VINF_SUCCESS) \
2707 return rcStrict2; \
2708 } while (0)
2709#else
2710# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2711#endif
2712
2713#ifndef IEM_WITH_SETJMP
2714
2715/**
2716 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2717 *
2718 * @returns Strict VBox status code.
2719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2720 * @param pu32 Where to return the opcode dword.
2721 */
2722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2723{
2724 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2725 if (rcStrict == VINF_SUCCESS)
2726 {
2727 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2728# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2729 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2730# else
2731 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2732 pVCpu->iem.s.abOpcode[offOpcode + 1],
2733 pVCpu->iem.s.abOpcode[offOpcode + 2],
2734 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2735# endif
2736 pVCpu->iem.s.offOpcode = offOpcode + 4;
2737 }
2738 else
2739 *pu32 = 0;
2740 return rcStrict;
2741}
2742
2743
2744/**
2745 * Fetches the next opcode dword.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu32 Where to return the opcode double word.
2750 */
2751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2752{
2753 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2754 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2755 {
2756 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2757# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2758 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2759# else
2760 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2761 pVCpu->iem.s.abOpcode[offOpcode + 1],
2762 pVCpu->iem.s.abOpcode[offOpcode + 2],
2763 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2764# endif
2765 return VINF_SUCCESS;
2766 }
2767 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2768}
2769
2770#else /* !IEM_WITH_SETJMP */
2771
2772/**
2773 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2774 *
2775 * @returns The opcode dword.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 */
2778DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2779{
2780# ifdef IEM_WITH_CODE_TLB
2781 uint32_t u32;
2782 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2783 return u32;
2784# else
2785 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2786 if (rcStrict == VINF_SUCCESS)
2787 {
2788 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2789 pVCpu->iem.s.offOpcode = offOpcode + 4;
2790# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2791 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2792# else
2793 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2794 pVCpu->iem.s.abOpcode[offOpcode + 1],
2795 pVCpu->iem.s.abOpcode[offOpcode + 2],
2796 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2797# endif
2798 }
2799 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2800# endif
2801}
2802
2803
2804/**
2805 * Fetches the next opcode dword, longjmp on error.
2806 *
2807 * @returns The opcode dword.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 */
2810DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2811{
2812# ifdef IEM_WITH_CODE_TLB
2813 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2814 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2815 if (RT_LIKELY( pbBuf != NULL
2816 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2817 {
2818 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2819# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2820 return *(uint32_t const *)&pbBuf[offBuf];
2821# else
2822 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2823 pbBuf[offBuf + 1],
2824 pbBuf[offBuf + 2],
2825 pbBuf[offBuf + 3]);
2826# endif
2827 }
2828# else
2829 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2830 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2831 {
2832 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2833# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2834 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2835# else
2836 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2837 pVCpu->iem.s.abOpcode[offOpcode + 1],
2838 pVCpu->iem.s.abOpcode[offOpcode + 2],
2839 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2840# endif
2841 }
2842# endif
2843 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2844}
2845
2846#endif /* !IEM_WITH_SETJMP */
2847
2848
2849/**
2850 * Fetches the next opcode dword, returns automatically on failure.
2851 *
2852 * @param a_pu32 Where to return the opcode dword.
2853 * @remark Implicitly references pVCpu.
2854 */
2855#ifndef IEM_WITH_SETJMP
2856# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2857 do \
2858 { \
2859 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2860 if (rcStrict2 != VINF_SUCCESS) \
2861 return rcStrict2; \
2862 } while (0)
2863#else
2864# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2865#endif
2866
2867#ifndef IEM_WITH_SETJMP
2868
2869/**
2870 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2871 *
2872 * @returns Strict VBox status code.
2873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2874 * @param pu64 Where to return the opcode dword.
2875 */
2876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2877{
2878 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2879 if (rcStrict == VINF_SUCCESS)
2880 {
2881 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2882 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2883 pVCpu->iem.s.abOpcode[offOpcode + 1],
2884 pVCpu->iem.s.abOpcode[offOpcode + 2],
2885 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2886 pVCpu->iem.s.offOpcode = offOpcode + 4;
2887 }
2888 else
2889 *pu64 = 0;
2890 return rcStrict;
2891}
2892
2893
2894/**
2895 * Fetches the next opcode dword, zero extending it to a quad word.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode quad word.
2900 */
2901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2902{
2903 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2904 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2905 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2906
2907 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2911 pVCpu->iem.s.offOpcode = offOpcode + 4;
2912 return VINF_SUCCESS;
2913}
2914
2915#endif /* !IEM_WITH_SETJMP */
2916
2917
2918/**
2919 * Fetches the next opcode dword and zero extends it to a quad word, returns
2920 * automatically on failure.
2921 *
2922 * @param a_pu64 Where to return the opcode quad word.
2923 * @remark Implicitly references pVCpu.
2924 */
2925#ifndef IEM_WITH_SETJMP
2926# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2927 do \
2928 { \
2929 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2930 if (rcStrict2 != VINF_SUCCESS) \
2931 return rcStrict2; \
2932 } while (0)
2933#else
2934# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2935#endif
2936
2937
2938#ifndef IEM_WITH_SETJMP
2939/**
2940 * Fetches the next signed double word from the opcode stream.
2941 *
2942 * @returns Strict VBox status code.
2943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2944 * @param pi32 Where to return the signed double word.
2945 */
2946DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2947{
2948 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2949}
2950#endif
2951
2952/**
2953 * Fetches the next signed double word from the opcode stream, returning
2954 * automatically on failure.
2955 *
2956 * @param a_pi32 Where to return the signed double word.
2957 * @remark Implicitly references pVCpu.
2958 */
2959#ifndef IEM_WITH_SETJMP
2960# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2961 do \
2962 { \
2963 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2964 if (rcStrict2 != VINF_SUCCESS) \
2965 return rcStrict2; \
2966 } while (0)
2967#else
2968# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2969#endif
2970
2971#ifndef IEM_WITH_SETJMP
2972
2973/**
2974 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2975 *
2976 * @returns Strict VBox status code.
2977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2978 * @param pu64 Where to return the opcode qword.
2979 */
2980DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2981{
2982 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2983 if (rcStrict == VINF_SUCCESS)
2984 {
2985 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2986 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2987 pVCpu->iem.s.abOpcode[offOpcode + 1],
2988 pVCpu->iem.s.abOpcode[offOpcode + 2],
2989 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2990 pVCpu->iem.s.offOpcode = offOpcode + 4;
2991 }
2992 else
2993 *pu64 = 0;
2994 return rcStrict;
2995}
2996
2997
2998/**
2999 * Fetches the next opcode dword, sign extending it into a quad word.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode quad word.
3004 */
3005DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3006{
3007 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3008 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3009 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3010
3011 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 *pu64 = i32;
3016 pVCpu->iem.s.offOpcode = offOpcode + 4;
3017 return VINF_SUCCESS;
3018}
3019
3020#endif /* !IEM_WITH_SETJMP */
3021
3022
3023/**
3024 * Fetches the next opcode double word and sign extends it to a quad word,
3025 * returns automatically on failure.
3026 *
3027 * @param a_pu64 Where to return the opcode quad word.
3028 * @remark Implicitly references pVCpu.
3029 */
3030#ifndef IEM_WITH_SETJMP
3031# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3032 do \
3033 { \
3034 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3035 if (rcStrict2 != VINF_SUCCESS) \
3036 return rcStrict2; \
3037 } while (0)
3038#else
3039# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3040#endif
3041
3042#ifndef IEM_WITH_SETJMP
3043
3044/**
3045 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3046 *
3047 * @returns Strict VBox status code.
3048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3049 * @param pu64 Where to return the opcode qword.
3050 */
3051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3052{
3053 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3054 if (rcStrict == VINF_SUCCESS)
3055 {
3056 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3059# else
3060 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3061 pVCpu->iem.s.abOpcode[offOpcode + 1],
3062 pVCpu->iem.s.abOpcode[offOpcode + 2],
3063 pVCpu->iem.s.abOpcode[offOpcode + 3],
3064 pVCpu->iem.s.abOpcode[offOpcode + 4],
3065 pVCpu->iem.s.abOpcode[offOpcode + 5],
3066 pVCpu->iem.s.abOpcode[offOpcode + 6],
3067 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3068# endif
3069 pVCpu->iem.s.offOpcode = offOpcode + 8;
3070 }
3071 else
3072 *pu64 = 0;
3073 return rcStrict;
3074}
3075
3076
3077/**
3078 * Fetches the next opcode qword.
3079 *
3080 * @returns Strict VBox status code.
3081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3082 * @param pu64 Where to return the opcode qword.
3083 */
3084DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3085{
3086 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3087 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3088 {
3089# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3090 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3091# else
3092 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3093 pVCpu->iem.s.abOpcode[offOpcode + 1],
3094 pVCpu->iem.s.abOpcode[offOpcode + 2],
3095 pVCpu->iem.s.abOpcode[offOpcode + 3],
3096 pVCpu->iem.s.abOpcode[offOpcode + 4],
3097 pVCpu->iem.s.abOpcode[offOpcode + 5],
3098 pVCpu->iem.s.abOpcode[offOpcode + 6],
3099 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3100# endif
3101 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3102 return VINF_SUCCESS;
3103 }
3104 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3105}
3106
3107#else /* IEM_WITH_SETJMP */
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3111 *
3112 * @returns The opcode qword.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 */
3115DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3116{
3117# ifdef IEM_WITH_CODE_TLB
3118 uint64_t u64;
3119 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3120 return u64;
3121# else
3122 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3123 if (rcStrict == VINF_SUCCESS)
3124 {
3125 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3126 pVCpu->iem.s.offOpcode = offOpcode + 8;
3127# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3128 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3129# else
3130 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3131 pVCpu->iem.s.abOpcode[offOpcode + 1],
3132 pVCpu->iem.s.abOpcode[offOpcode + 2],
3133 pVCpu->iem.s.abOpcode[offOpcode + 3],
3134 pVCpu->iem.s.abOpcode[offOpcode + 4],
3135 pVCpu->iem.s.abOpcode[offOpcode + 5],
3136 pVCpu->iem.s.abOpcode[offOpcode + 6],
3137 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3138# endif
3139 }
3140 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3141# endif
3142}
3143
3144
3145/**
3146 * Fetches the next opcode qword, longjmp on error.
3147 *
3148 * @returns The opcode qword.
3149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3150 */
3151DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3152{
3153# ifdef IEM_WITH_CODE_TLB
3154 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3155 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3156 if (RT_LIKELY( pbBuf != NULL
3157 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3158 {
3159 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3160# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3161 return *(uint64_t const *)&pbBuf[offBuf];
3162# else
3163 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3164 pbBuf[offBuf + 1],
3165 pbBuf[offBuf + 2],
3166 pbBuf[offBuf + 3],
3167 pbBuf[offBuf + 4],
3168 pbBuf[offBuf + 5],
3169 pbBuf[offBuf + 6],
3170 pbBuf[offBuf + 7]);
3171# endif
3172 }
3173# else
3174 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3175 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3176 {
3177 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3178# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3179 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3180# else
3181 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3182 pVCpu->iem.s.abOpcode[offOpcode + 1],
3183 pVCpu->iem.s.abOpcode[offOpcode + 2],
3184 pVCpu->iem.s.abOpcode[offOpcode + 3],
3185 pVCpu->iem.s.abOpcode[offOpcode + 4],
3186 pVCpu->iem.s.abOpcode[offOpcode + 5],
3187 pVCpu->iem.s.abOpcode[offOpcode + 6],
3188 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3189# endif
3190 }
3191# endif
3192 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3193}
3194
3195#endif /* IEM_WITH_SETJMP */
3196
3197/**
3198 * Fetches the next opcode quad word, returns automatically on failure.
3199 *
3200 * @param a_pu64 Where to return the opcode quad word.
3201 * @remark Implicitly references pVCpu.
3202 */
3203#ifndef IEM_WITH_SETJMP
3204# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3205 do \
3206 { \
3207 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3208 if (rcStrict2 != VINF_SUCCESS) \
3209 return rcStrict2; \
3210 } while (0)
3211#else
3212# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3213#endif
3214
3215
3216/** @name Misc Worker Functions.
3217 * @{
3218 */
3219
3220/**
3221 * Gets the exception class for the specified exception vector.
3222 *
3223 * @returns The class of the specified exception.
3224 * @param uVector The exception vector.
3225 */
3226IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3227{
3228 Assert(uVector <= X86_XCPT_LAST);
3229 switch (uVector)
3230 {
3231 case X86_XCPT_DE:
3232 case X86_XCPT_TS:
3233 case X86_XCPT_NP:
3234 case X86_XCPT_SS:
3235 case X86_XCPT_GP:
3236 case X86_XCPT_SX: /* AMD only */
3237 return IEMXCPTCLASS_CONTRIBUTORY;
3238
3239 case X86_XCPT_PF:
3240 case X86_XCPT_VE: /* Intel only */
3241 return IEMXCPTCLASS_PAGE_FAULT;
3242
3243 case X86_XCPT_DF:
3244 return IEMXCPTCLASS_DOUBLE_FAULT;
3245 }
3246 return IEMXCPTCLASS_BENIGN;
3247}
3248
3249
3250/**
3251 * Evaluates how to handle an exception caused during delivery of another event
3252 * (exception / interrupt).
3253 *
3254 * @returns How to handle the recursive exception.
3255 * @param pVCpu The cross context virtual CPU structure of the
3256 * calling thread.
3257 * @param fPrevFlags The flags of the previous event.
3258 * @param uPrevVector The vector of the previous event.
3259 * @param fCurFlags The flags of the current exception.
3260 * @param uCurVector The vector of the current exception.
3261 * @param pfXcptRaiseInfo Where to store additional information about the
3262 * exception condition. Optional.
3263 */
3264VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3265 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3266{
3267 /*
3268 * Only CPU exceptions can be raised while delivering other events, software interrupt
3269 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3270 */
3271 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3272 Assert(pVCpu); RT_NOREF(pVCpu);
3273 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 {
3335 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3336 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3337 }
3338
3339 if (pfXcptRaiseInfo)
3340 *pfXcptRaiseInfo = fRaiseInfo;
3341 return enmRaise;
3342}
3343
3344
3345/**
3346 * Enters the CPU shutdown state initiated by a triple fault or other
3347 * unrecoverable conditions.
3348 *
3349 * @returns Strict VBox status code.
3350 * @param pVCpu The cross context virtual CPU structure of the
3351 * calling thread.
3352 */
3353IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3354{
3355 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3356 {
3357 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3358 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3359 }
3360
3361 RT_NOREF(pVCpu);
3362 return VINF_EM_TRIPLE_FAULT;
3363}
3364
3365
3366/**
3367 * Validates a new SS segment.
3368 *
3369 * @returns VBox strict status code.
3370 * @param pVCpu The cross context virtual CPU structure of the
3371 * calling thread.
3372 * @param pCtx The CPU context.
3373 * @param NewSS The new SS selctor.
3374 * @param uCpl The CPL to load the stack for.
3375 * @param pDesc Where to return the descriptor.
3376 */
3377IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3378{
3379 NOREF(pCtx);
3380
3381 /* Null selectors are not allowed (we're not called for dispatching
3382 interrupts with SS=0 in long mode). */
3383 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3384 {
3385 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3386 return iemRaiseTaskSwitchFault0(pVCpu);
3387 }
3388
3389 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3390 if ((NewSS & X86_SEL_RPL) != uCpl)
3391 {
3392 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3393 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3394 }
3395
3396 /*
3397 * Read the descriptor.
3398 */
3399 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402
3403 /*
3404 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3405 */
3406 if (!pDesc->Legacy.Gen.u1DescType)
3407 {
3408 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3409 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3410 }
3411
3412 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3413 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3414 {
3415 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3416 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3417 }
3418 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3419 {
3420 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3421 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3422 }
3423
3424 /* Is it there? */
3425 /** @todo testcase: Is this checked before the canonical / limit check below? */
3426 if (!pDesc->Legacy.Gen.u1Present)
3427 {
3428 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3429 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3430 }
3431
3432 return VINF_SUCCESS;
3433}
3434
3435
3436/**
3437 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3438 * not.
3439 *
3440 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3441 * @param a_pCtx The CPU context.
3442 */
3443#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3444# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3445 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3446 ? (a_pCtx)->eflags.u \
3447 : CPUMRawGetEFlags(a_pVCpu) )
3448#else
3449# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3450 ( (a_pCtx)->eflags.u )
3451#endif
3452
3453/**
3454 * Updates the EFLAGS in the correct manner wrt. PATM.
3455 *
3456 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3457 * @param a_pCtx The CPU context.
3458 * @param a_fEfl The new EFLAGS.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3462 do { \
3463 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3464 (a_pCtx)->eflags.u = (a_fEfl); \
3465 else \
3466 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3467 } while (0)
3468#else
3469# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3470 do { \
3471 (a_pCtx)->eflags.u = (a_fEfl); \
3472 } while (0)
3473#endif
3474
3475
3476/** @} */
3477
3478/** @name Raising Exceptions.
3479 *
3480 * @{
3481 */
3482
3483
3484/**
3485 * Loads the specified stack far pointer from the TSS.
3486 *
3487 * @returns VBox strict status code.
3488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3489 * @param pCtx The CPU context.
3490 * @param uCpl The CPL to load the stack for.
3491 * @param pSelSS Where to return the new stack segment.
3492 * @param puEsp Where to return the new stack pointer.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3495 PRTSEL pSelSS, uint32_t *puEsp)
3496{
3497 VBOXSTRICTRC rcStrict;
3498 Assert(uCpl < 4);
3499
3500 switch (pCtx->tr.Attr.n.u4Type)
3501 {
3502 /*
3503 * 16-bit TSS (X86TSS16).
3504 */
3505 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3506 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3507 {
3508 uint32_t off = uCpl * 4 + 2;
3509 if (off + 4 <= pCtx->tr.u32Limit)
3510 {
3511 /** @todo check actual access pattern here. */
3512 uint32_t u32Tmp = 0; /* gcc maybe... */
3513 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3514 if (rcStrict == VINF_SUCCESS)
3515 {
3516 *puEsp = RT_LOWORD(u32Tmp);
3517 *pSelSS = RT_HIWORD(u32Tmp);
3518 return VINF_SUCCESS;
3519 }
3520 }
3521 else
3522 {
3523 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3524 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526 break;
3527 }
3528
3529 /*
3530 * 32-bit TSS (X86TSS32).
3531 */
3532 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3533 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3534 {
3535 uint32_t off = uCpl * 8 + 4;
3536 if (off + 7 <= pCtx->tr.u32Limit)
3537 {
3538/** @todo check actual access pattern here. */
3539 uint64_t u64Tmp;
3540 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3541 if (rcStrict == VINF_SUCCESS)
3542 {
3543 *puEsp = u64Tmp & UINT32_MAX;
3544 *pSelSS = (RTSEL)(u64Tmp >> 32);
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 else
3549 {
3550 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3552 }
3553 break;
3554 }
3555
3556 default:
3557 AssertFailed();
3558 rcStrict = VERR_IEM_IPE_4;
3559 break;
3560 }
3561
3562 *puEsp = 0; /* make gcc happy */
3563 *pSelSS = 0; /* make gcc happy */
3564 return rcStrict;
3565}
3566
3567
3568/**
3569 * Loads the specified stack pointer from the 64-bit TSS.
3570 *
3571 * @returns VBox strict status code.
3572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param pCtx The CPU context.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3576 * @param puRsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3579{
3580 Assert(uCpl < 4);
3581 Assert(uIst < 8);
3582 *puRsp = 0; /* make gcc happy */
3583
3584 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3585
3586 uint32_t off;
3587 if (uIst)
3588 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3589 else
3590 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3591 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3592 {
3593 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3594 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596
3597 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3598}
3599
3600
3601/**
3602 * Adjust the CPU state according to the exception being raised.
3603 *
3604 * @param pCtx The CPU context.
3605 * @param u8Vector The exception that has been raised.
3606 */
3607DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3608{
3609 switch (u8Vector)
3610 {
3611 case X86_XCPT_DB:
3612 pCtx->dr[7] &= ~X86_DR7_GD;
3613 break;
3614 /** @todo Read the AMD and Intel exception reference... */
3615 }
3616}
3617
3618
3619/**
3620 * Implements exceptions and interrupts for real mode.
3621 *
3622 * @returns VBox strict status code.
3623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3624 * @param pCtx The CPU context.
3625 * @param cbInstr The number of bytes to offset rIP by in the return
3626 * address.
3627 * @param u8Vector The interrupt / exception vector number.
3628 * @param fFlags The flags.
3629 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3630 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3631 */
3632IEM_STATIC VBOXSTRICTRC
3633iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3634 PCPUMCTX pCtx,
3635 uint8_t cbInstr,
3636 uint8_t u8Vector,
3637 uint32_t fFlags,
3638 uint16_t uErr,
3639 uint64_t uCr2)
3640{
3641 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3642 NOREF(uErr); NOREF(uCr2);
3643
3644 /*
3645 * Read the IDT entry.
3646 */
3647 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3648 {
3649 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3650 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3651 }
3652 RTFAR16 Idte;
3653 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 return rcStrict;
3656
3657 /*
3658 * Push the stack frame.
3659 */
3660 uint16_t *pu16Frame;
3661 uint64_t uNewRsp;
3662 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3663 if (rcStrict != VINF_SUCCESS)
3664 return rcStrict;
3665
3666 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3667#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3668 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3669 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3670 fEfl |= UINT16_C(0xf000);
3671#endif
3672 pu16Frame[2] = (uint16_t)fEfl;
3673 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3674 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3675 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3676 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3677 return rcStrict;
3678
3679 /*
3680 * Load the vector address into cs:ip and make exception specific state
3681 * adjustments.
3682 */
3683 pCtx->cs.Sel = Idte.sel;
3684 pCtx->cs.ValidSel = Idte.sel;
3685 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3686 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3687 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3688 pCtx->rip = Idte.off;
3689 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3690 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3691
3692 /** @todo do we actually do this in real mode? */
3693 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3694 iemRaiseXcptAdjustState(pCtx, u8Vector);
3695
3696 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3697}
3698
3699
3700/**
3701 * Loads a NULL data selector into when coming from V8086 mode.
3702 *
3703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3704 * @param pSReg Pointer to the segment register.
3705 */
3706IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3707{
3708 pSReg->Sel = 0;
3709 pSReg->ValidSel = 0;
3710 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3711 {
3712 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3713 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3714 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3715 }
3716 else
3717 {
3718 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3719 /** @todo check this on AMD-V */
3720 pSReg->u64Base = 0;
3721 pSReg->u32Limit = 0;
3722 }
3723}
3724
3725
3726/**
3727 * Loads a segment selector during a task switch in V8086 mode.
3728 *
3729 * @param pSReg Pointer to the segment register.
3730 * @param uSel The selector value to load.
3731 */
3732IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3733{
3734 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3735 pSReg->Sel = uSel;
3736 pSReg->ValidSel = uSel;
3737 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3738 pSReg->u64Base = uSel << 4;
3739 pSReg->u32Limit = 0xffff;
3740 pSReg->Attr.u = 0xf3;
3741}
3742
3743
3744/**
3745 * Loads a NULL data selector into a selector register, both the hidden and
3746 * visible parts, in protected mode.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3749 * @param pSReg Pointer to the segment register.
3750 * @param uRpl The RPL.
3751 */
3752IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3753{
3754 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3755 * data selector in protected mode. */
3756 pSReg->Sel = uRpl;
3757 pSReg->ValidSel = uRpl;
3758 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3759 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3760 {
3761 /* VT-x (Intel 3960x) observed doing something like this. */
3762 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3763 pSReg->u32Limit = UINT32_MAX;
3764 pSReg->u64Base = 0;
3765 }
3766 else
3767 {
3768 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3769 pSReg->u32Limit = 0;
3770 pSReg->u64Base = 0;
3771 }
3772}
3773
3774
3775/**
3776 * Loads a segment selector during a task switch in protected mode.
3777 *
3778 * In this task switch scenario, we would throw \#TS exceptions rather than
3779 * \#GPs.
3780 *
3781 * @returns VBox strict status code.
3782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3783 * @param pSReg Pointer to the segment register.
3784 * @param uSel The new selector value.
3785 *
3786 * @remarks This does _not_ handle CS or SS.
3787 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3788 */
3789IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3790{
3791 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3792
3793 /* Null data selector. */
3794 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3795 {
3796 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3797 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3798 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3799 return VINF_SUCCESS;
3800 }
3801
3802 /* Fetch the descriptor. */
3803 IEMSELDESC Desc;
3804 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3805 if (rcStrict != VINF_SUCCESS)
3806 {
3807 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3808 VBOXSTRICTRC_VAL(rcStrict)));
3809 return rcStrict;
3810 }
3811
3812 /* Must be a data segment or readable code segment. */
3813 if ( !Desc.Legacy.Gen.u1DescType
3814 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3817 Desc.Legacy.Gen.u4Type));
3818 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3819 }
3820
3821 /* Check privileges for data segments and non-conforming code segments. */
3822 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3823 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 {
3825 /* The RPL and the new CPL must be less than or equal to the DPL. */
3826 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3827 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3828 {
3829 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3830 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3831 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3832 }
3833 }
3834
3835 /* Is it there? */
3836 if (!Desc.Legacy.Gen.u1Present)
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3839 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3840 }
3841
3842 /* The base and limit. */
3843 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3844 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3845
3846 /*
3847 * Ok, everything checked out fine. Now set the accessed bit before
3848 * committing the result into the registers.
3849 */
3850 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3851 {
3852 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3853 if (rcStrict != VINF_SUCCESS)
3854 return rcStrict;
3855 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3856 }
3857
3858 /* Commit */
3859 pSReg->Sel = uSel;
3860 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3861 pSReg->u32Limit = cbLimit;
3862 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3863 pSReg->ValidSel = uSel;
3864 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3865 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3866 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3867
3868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3869 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3870 return VINF_SUCCESS;
3871}
3872
3873
3874/**
3875 * Performs a task switch.
3876 *
3877 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3878 * caller is responsible for performing the necessary checks (like DPL, TSS
3879 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3880 * reference for JMP, CALL, IRET.
3881 *
3882 * If the task switch is the due to a software interrupt or hardware exception,
3883 * the caller is responsible for validating the TSS selector and descriptor. See
3884 * Intel Instruction reference for INT n.
3885 *
3886 * @returns VBox strict status code.
3887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3888 * @param pCtx The CPU context.
3889 * @param enmTaskSwitch What caused this task switch.
3890 * @param uNextEip The EIP effective after the task switch.
3891 * @param fFlags The flags.
3892 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3893 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3894 * @param SelTSS The TSS selector of the new task.
3895 * @param pNewDescTSS Pointer to the new TSS descriptor.
3896 */
3897IEM_STATIC VBOXSTRICTRC
3898iemTaskSwitch(PVMCPU pVCpu,
3899 PCPUMCTX pCtx,
3900 IEMTASKSWITCH enmTaskSwitch,
3901 uint32_t uNextEip,
3902 uint32_t fFlags,
3903 uint16_t uErr,
3904 uint64_t uCr2,
3905 RTSEL SelTSS,
3906 PIEMSELDESC pNewDescTSS)
3907{
3908 Assert(!IEM_IS_REAL_MODE(pVCpu));
3909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3910
3911 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3912 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3913 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3914 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3916
3917 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3918 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3919
3920 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3921 fIsNewTSS386, pCtx->eip, uNextEip));
3922
3923 /* Update CR2 in case it's a page-fault. */
3924 /** @todo This should probably be done much earlier in IEM/PGM. See
3925 * @bugref{5653#c49}. */
3926 if (fFlags & IEM_XCPT_FLAGS_CR2)
3927 pCtx->cr2 = uCr2;
3928
3929 /*
3930 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3931 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3932 */
3933 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3934 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3935 if (uNewTSSLimit < uNewTSSLimitMin)
3936 {
3937 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3938 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3939 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3940 }
3941
3942 /*
3943 * Check the current TSS limit. The last written byte to the current TSS during the
3944 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3945 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3946 *
3947 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3948 * end up with smaller than "legal" TSS limits.
3949 */
3950 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3951 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3952 if (uCurTSSLimit < uCurTSSLimitMin)
3953 {
3954 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3955 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3957 }
3958
3959 /*
3960 * Verify that the new TSS can be accessed and map it. Map only the required contents
3961 * and not the entire TSS.
3962 */
3963 void *pvNewTSS;
3964 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3965 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3966 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3967 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3968 * not perform correct translation if this happens. See Intel spec. 7.2.1
3969 * "Task-State Segment" */
3970 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3971 if (rcStrict != VINF_SUCCESS)
3972 {
3973 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3974 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3975 return rcStrict;
3976 }
3977
3978 /*
3979 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3980 */
3981 uint32_t u32EFlags = pCtx->eflags.u32;
3982 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3983 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3984 {
3985 PX86DESC pDescCurTSS;
3986 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3987 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3988 if (rcStrict != VINF_SUCCESS)
3989 {
3990 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3991 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3992 return rcStrict;
3993 }
3994
3995 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3996 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3997 if (rcStrict != VINF_SUCCESS)
3998 {
3999 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4000 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4001 return rcStrict;
4002 }
4003
4004 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4005 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4006 {
4007 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4008 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4009 u32EFlags &= ~X86_EFL_NT;
4010 }
4011 }
4012
4013 /*
4014 * Save the CPU state into the current TSS.
4015 */
4016 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4017 if (GCPtrNewTSS == GCPtrCurTSS)
4018 {
4019 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4020 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4021 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4022 }
4023 if (fIsNewTSS386)
4024 {
4025 /*
4026 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4027 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4028 */
4029 void *pvCurTSS32;
4030 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4031 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4032 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4033 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4034 if (rcStrict != VINF_SUCCESS)
4035 {
4036 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4037 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4038 return rcStrict;
4039 }
4040
4041 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4042 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4043 pCurTSS32->eip = uNextEip;
4044 pCurTSS32->eflags = u32EFlags;
4045 pCurTSS32->eax = pCtx->eax;
4046 pCurTSS32->ecx = pCtx->ecx;
4047 pCurTSS32->edx = pCtx->edx;
4048 pCurTSS32->ebx = pCtx->ebx;
4049 pCurTSS32->esp = pCtx->esp;
4050 pCurTSS32->ebp = pCtx->ebp;
4051 pCurTSS32->esi = pCtx->esi;
4052 pCurTSS32->edi = pCtx->edi;
4053 pCurTSS32->es = pCtx->es.Sel;
4054 pCurTSS32->cs = pCtx->cs.Sel;
4055 pCurTSS32->ss = pCtx->ss.Sel;
4056 pCurTSS32->ds = pCtx->ds.Sel;
4057 pCurTSS32->fs = pCtx->fs.Sel;
4058 pCurTSS32->gs = pCtx->gs.Sel;
4059
4060 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4061 if (rcStrict != VINF_SUCCESS)
4062 {
4063 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4064 VBOXSTRICTRC_VAL(rcStrict)));
4065 return rcStrict;
4066 }
4067 }
4068 else
4069 {
4070 /*
4071 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4072 */
4073 void *pvCurTSS16;
4074 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4075 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4076 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4077 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4078 if (rcStrict != VINF_SUCCESS)
4079 {
4080 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4081 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4082 return rcStrict;
4083 }
4084
4085 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4086 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4087 pCurTSS16->ip = uNextEip;
4088 pCurTSS16->flags = u32EFlags;
4089 pCurTSS16->ax = pCtx->ax;
4090 pCurTSS16->cx = pCtx->cx;
4091 pCurTSS16->dx = pCtx->dx;
4092 pCurTSS16->bx = pCtx->bx;
4093 pCurTSS16->sp = pCtx->sp;
4094 pCurTSS16->bp = pCtx->bp;
4095 pCurTSS16->si = pCtx->si;
4096 pCurTSS16->di = pCtx->di;
4097 pCurTSS16->es = pCtx->es.Sel;
4098 pCurTSS16->cs = pCtx->cs.Sel;
4099 pCurTSS16->ss = pCtx->ss.Sel;
4100 pCurTSS16->ds = pCtx->ds.Sel;
4101
4102 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4103 if (rcStrict != VINF_SUCCESS)
4104 {
4105 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4106 VBOXSTRICTRC_VAL(rcStrict)));
4107 return rcStrict;
4108 }
4109 }
4110
4111 /*
4112 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4113 */
4114 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4115 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4116 {
4117 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4118 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4119 pNewTSS->selPrev = pCtx->tr.Sel;
4120 }
4121
4122 /*
4123 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4124 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4125 */
4126 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4127 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4128 bool fNewDebugTrap;
4129 if (fIsNewTSS386)
4130 {
4131 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4132 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4133 uNewEip = pNewTSS32->eip;
4134 uNewEflags = pNewTSS32->eflags;
4135 uNewEax = pNewTSS32->eax;
4136 uNewEcx = pNewTSS32->ecx;
4137 uNewEdx = pNewTSS32->edx;
4138 uNewEbx = pNewTSS32->ebx;
4139 uNewEsp = pNewTSS32->esp;
4140 uNewEbp = pNewTSS32->ebp;
4141 uNewEsi = pNewTSS32->esi;
4142 uNewEdi = pNewTSS32->edi;
4143 uNewES = pNewTSS32->es;
4144 uNewCS = pNewTSS32->cs;
4145 uNewSS = pNewTSS32->ss;
4146 uNewDS = pNewTSS32->ds;
4147 uNewFS = pNewTSS32->fs;
4148 uNewGS = pNewTSS32->gs;
4149 uNewLdt = pNewTSS32->selLdt;
4150 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4151 }
4152 else
4153 {
4154 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4155 uNewCr3 = 0;
4156 uNewEip = pNewTSS16->ip;
4157 uNewEflags = pNewTSS16->flags;
4158 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4159 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4160 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4161 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4162 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4163 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4164 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4165 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4166 uNewES = pNewTSS16->es;
4167 uNewCS = pNewTSS16->cs;
4168 uNewSS = pNewTSS16->ss;
4169 uNewDS = pNewTSS16->ds;
4170 uNewFS = 0;
4171 uNewGS = 0;
4172 uNewLdt = pNewTSS16->selLdt;
4173 fNewDebugTrap = false;
4174 }
4175
4176 if (GCPtrNewTSS == GCPtrCurTSS)
4177 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4178 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4179
4180 /*
4181 * We're done accessing the new TSS.
4182 */
4183 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4184 if (rcStrict != VINF_SUCCESS)
4185 {
4186 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4187 return rcStrict;
4188 }
4189
4190 /*
4191 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4192 */
4193 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4194 {
4195 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4196 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4197 if (rcStrict != VINF_SUCCESS)
4198 {
4199 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4200 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4201 return rcStrict;
4202 }
4203
4204 /* Check that the descriptor indicates the new TSS is available (not busy). */
4205 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4206 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4207 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4208
4209 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4210 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4211 if (rcStrict != VINF_SUCCESS)
4212 {
4213 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4214 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4215 return rcStrict;
4216 }
4217 }
4218
4219 /*
4220 * From this point on, we're technically in the new task. We will defer exceptions
4221 * until the completion of the task switch but before executing any instructions in the new task.
4222 */
4223 pCtx->tr.Sel = SelTSS;
4224 pCtx->tr.ValidSel = SelTSS;
4225 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4226 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4227 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4228 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4229 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4230
4231 /* Set the busy bit in TR. */
4232 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4233 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4234 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4235 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4236 {
4237 uNewEflags |= X86_EFL_NT;
4238 }
4239
4240 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4241 pCtx->cr0 |= X86_CR0_TS;
4242 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4243
4244 pCtx->eip = uNewEip;
4245 pCtx->eax = uNewEax;
4246 pCtx->ecx = uNewEcx;
4247 pCtx->edx = uNewEdx;
4248 pCtx->ebx = uNewEbx;
4249 pCtx->esp = uNewEsp;
4250 pCtx->ebp = uNewEbp;
4251 pCtx->esi = uNewEsi;
4252 pCtx->edi = uNewEdi;
4253
4254 uNewEflags &= X86_EFL_LIVE_MASK;
4255 uNewEflags |= X86_EFL_RA1_MASK;
4256 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4257
4258 /*
4259 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4260 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4261 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4262 */
4263 pCtx->es.Sel = uNewES;
4264 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4265
4266 pCtx->cs.Sel = uNewCS;
4267 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4268
4269 pCtx->ss.Sel = uNewSS;
4270 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4271
4272 pCtx->ds.Sel = uNewDS;
4273 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4274
4275 pCtx->fs.Sel = uNewFS;
4276 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4277
4278 pCtx->gs.Sel = uNewGS;
4279 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4281
4282 pCtx->ldtr.Sel = uNewLdt;
4283 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4284 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4285 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4286
4287 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4288 {
4289 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4290 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4291 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4292 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4293 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4294 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4295 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4296 }
4297
4298 /*
4299 * Switch CR3 for the new task.
4300 */
4301 if ( fIsNewTSS386
4302 && (pCtx->cr0 & X86_CR0_PG))
4303 {
4304 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4305 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4306 {
4307 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4308 AssertRCSuccessReturn(rc, rc);
4309 }
4310 else
4311 pCtx->cr3 = uNewCr3;
4312
4313 /* Inform PGM. */
4314 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4315 {
4316 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4317 AssertRCReturn(rc, rc);
4318 /* ignore informational status codes */
4319 }
4320 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4321 }
4322
4323 /*
4324 * Switch LDTR for the new task.
4325 */
4326 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4327 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4328 else
4329 {
4330 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4331
4332 IEMSELDESC DescNewLdt;
4333 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4334 if (rcStrict != VINF_SUCCESS)
4335 {
4336 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4337 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4338 return rcStrict;
4339 }
4340 if ( !DescNewLdt.Legacy.Gen.u1Present
4341 || DescNewLdt.Legacy.Gen.u1DescType
4342 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4343 {
4344 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4345 uNewLdt, DescNewLdt.Legacy.u));
4346 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4347 }
4348
4349 pCtx->ldtr.ValidSel = uNewLdt;
4350 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4351 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4352 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4353 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4354 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4355 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4357 }
4358
4359 IEMSELDESC DescSS;
4360 if (IEM_IS_V86_MODE(pVCpu))
4361 {
4362 pVCpu->iem.s.uCpl = 3;
4363 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4364 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4365 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4366 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4367 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4368 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4369
4370 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4371 DescSS.Legacy.u = 0;
4372 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4373 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4374 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4375 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4376 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4377 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4378 DescSS.Legacy.Gen.u2Dpl = 3;
4379 }
4380 else
4381 {
4382 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4383
4384 /*
4385 * Load the stack segment for the new task.
4386 */
4387 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4388 {
4389 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4391 }
4392
4393 /* Fetch the descriptor. */
4394 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4395 if (rcStrict != VINF_SUCCESS)
4396 {
4397 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4398 VBOXSTRICTRC_VAL(rcStrict)));
4399 return rcStrict;
4400 }
4401
4402 /* SS must be a data segment and writable. */
4403 if ( !DescSS.Legacy.Gen.u1DescType
4404 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4405 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4406 {
4407 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4408 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4409 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4410 }
4411
4412 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4413 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4414 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4415 {
4416 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4417 uNewCpl));
4418 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4419 }
4420
4421 /* Is it there? */
4422 if (!DescSS.Legacy.Gen.u1Present)
4423 {
4424 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4425 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4426 }
4427
4428 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4429 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4430
4431 /* Set the accessed bit before committing the result into SS. */
4432 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4433 {
4434 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4435 if (rcStrict != VINF_SUCCESS)
4436 return rcStrict;
4437 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4438 }
4439
4440 /* Commit SS. */
4441 pCtx->ss.Sel = uNewSS;
4442 pCtx->ss.ValidSel = uNewSS;
4443 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4444 pCtx->ss.u32Limit = cbLimit;
4445 pCtx->ss.u64Base = u64Base;
4446 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4447 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4448
4449 /* CPL has changed, update IEM before loading rest of segments. */
4450 pVCpu->iem.s.uCpl = uNewCpl;
4451
4452 /*
4453 * Load the data segments for the new task.
4454 */
4455 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4456 if (rcStrict != VINF_SUCCESS)
4457 return rcStrict;
4458 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4459 if (rcStrict != VINF_SUCCESS)
4460 return rcStrict;
4461 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4462 if (rcStrict != VINF_SUCCESS)
4463 return rcStrict;
4464 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4465 if (rcStrict != VINF_SUCCESS)
4466 return rcStrict;
4467
4468 /*
4469 * Load the code segment for the new task.
4470 */
4471 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4472 {
4473 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4474 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4475 }
4476
4477 /* Fetch the descriptor. */
4478 IEMSELDESC DescCS;
4479 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4480 if (rcStrict != VINF_SUCCESS)
4481 {
4482 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4483 return rcStrict;
4484 }
4485
4486 /* CS must be a code segment. */
4487 if ( !DescCS.Legacy.Gen.u1DescType
4488 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4489 {
4490 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4491 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4492 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4493 }
4494
4495 /* For conforming CS, DPL must be less than or equal to the RPL. */
4496 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4497 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4498 {
4499 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4500 DescCS.Legacy.Gen.u2Dpl));
4501 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4502 }
4503
4504 /* For non-conforming CS, DPL must match RPL. */
4505 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4506 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4507 {
4508 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4509 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4510 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4511 }
4512
4513 /* Is it there? */
4514 if (!DescCS.Legacy.Gen.u1Present)
4515 {
4516 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4517 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4518 }
4519
4520 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4521 u64Base = X86DESC_BASE(&DescCS.Legacy);
4522
4523 /* Set the accessed bit before committing the result into CS. */
4524 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4525 {
4526 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4527 if (rcStrict != VINF_SUCCESS)
4528 return rcStrict;
4529 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4530 }
4531
4532 /* Commit CS. */
4533 pCtx->cs.Sel = uNewCS;
4534 pCtx->cs.ValidSel = uNewCS;
4535 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4536 pCtx->cs.u32Limit = cbLimit;
4537 pCtx->cs.u64Base = u64Base;
4538 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4539 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4540 }
4541
4542 /** @todo Debug trap. */
4543 if (fIsNewTSS386 && fNewDebugTrap)
4544 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4545
4546 /*
4547 * Construct the error code masks based on what caused this task switch.
4548 * See Intel Instruction reference for INT.
4549 */
4550 uint16_t uExt;
4551 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4552 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4553 {
4554 uExt = 1;
4555 }
4556 else
4557 uExt = 0;
4558
4559 /*
4560 * Push any error code on to the new stack.
4561 */
4562 if (fFlags & IEM_XCPT_FLAGS_ERR)
4563 {
4564 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4565 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4566 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4567
4568 /* Check that there is sufficient space on the stack. */
4569 /** @todo Factor out segment limit checking for normal/expand down segments
4570 * into a separate function. */
4571 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4572 {
4573 if ( pCtx->esp - 1 > cbLimitSS
4574 || pCtx->esp < cbStackFrame)
4575 {
4576 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4577 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4578 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4579 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4580 }
4581 }
4582 else
4583 {
4584 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4585 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4586 {
4587 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4588 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4589 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4590 }
4591 }
4592
4593
4594 if (fIsNewTSS386)
4595 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4596 else
4597 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4598 if (rcStrict != VINF_SUCCESS)
4599 {
4600 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4601 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4602 return rcStrict;
4603 }
4604 }
4605
4606 /* Check the new EIP against the new CS limit. */
4607 if (pCtx->eip > pCtx->cs.u32Limit)
4608 {
4609 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4610 pCtx->eip, pCtx->cs.u32Limit));
4611 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4612 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4613 }
4614
4615 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4616 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4617}
4618
4619
4620/**
4621 * Implements exceptions and interrupts for protected mode.
4622 *
4623 * @returns VBox strict status code.
4624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4625 * @param pCtx The CPU context.
4626 * @param cbInstr The number of bytes to offset rIP by in the return
4627 * address.
4628 * @param u8Vector The interrupt / exception vector number.
4629 * @param fFlags The flags.
4630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4632 */
4633IEM_STATIC VBOXSTRICTRC
4634iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4635 PCPUMCTX pCtx,
4636 uint8_t cbInstr,
4637 uint8_t u8Vector,
4638 uint32_t fFlags,
4639 uint16_t uErr,
4640 uint64_t uCr2)
4641{
4642 /*
4643 * Read the IDT entry.
4644 */
4645 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4646 {
4647 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4648 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4649 }
4650 X86DESC Idte;
4651 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4652 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4653 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4654 return rcStrict;
4655 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4656 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4657 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4658
4659 /*
4660 * Check the descriptor type, DPL and such.
4661 * ASSUMES this is done in the same order as described for call-gate calls.
4662 */
4663 if (Idte.Gate.u1DescType)
4664 {
4665 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4667 }
4668 bool fTaskGate = false;
4669 uint8_t f32BitGate = true;
4670 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4671 switch (Idte.Gate.u4Type)
4672 {
4673 case X86_SEL_TYPE_SYS_UNDEFINED:
4674 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4675 case X86_SEL_TYPE_SYS_LDT:
4676 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4677 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4678 case X86_SEL_TYPE_SYS_UNDEFINED2:
4679 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4680 case X86_SEL_TYPE_SYS_UNDEFINED3:
4681 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4682 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4683 case X86_SEL_TYPE_SYS_UNDEFINED4:
4684 {
4685 /** @todo check what actually happens when the type is wrong...
4686 * esp. call gates. */
4687 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4688 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4689 }
4690
4691 case X86_SEL_TYPE_SYS_286_INT_GATE:
4692 f32BitGate = false;
4693 RT_FALL_THRU();
4694 case X86_SEL_TYPE_SYS_386_INT_GATE:
4695 fEflToClear |= X86_EFL_IF;
4696 break;
4697
4698 case X86_SEL_TYPE_SYS_TASK_GATE:
4699 fTaskGate = true;
4700#ifndef IEM_IMPLEMENTS_TASKSWITCH
4701 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4702#endif
4703 break;
4704
4705 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4706 f32BitGate = false;
4707 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4708 break;
4709
4710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4711 }
4712
4713 /* Check DPL against CPL if applicable. */
4714 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4715 {
4716 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4717 {
4718 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4719 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4720 }
4721 }
4722
4723 /* Is it there? */
4724 if (!Idte.Gate.u1Present)
4725 {
4726 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4727 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4728 }
4729
4730 /* Is it a task-gate? */
4731 if (fTaskGate)
4732 {
4733 /*
4734 * Construct the error code masks based on what caused this task switch.
4735 * See Intel Instruction reference for INT.
4736 */
4737 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4738 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4739 RTSEL SelTSS = Idte.Gate.u16Sel;
4740
4741 /*
4742 * Fetch the TSS descriptor in the GDT.
4743 */
4744 IEMSELDESC DescTSS;
4745 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4746 if (rcStrict != VINF_SUCCESS)
4747 {
4748 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4749 VBOXSTRICTRC_VAL(rcStrict)));
4750 return rcStrict;
4751 }
4752
4753 /* The TSS descriptor must be a system segment and be available (not busy). */
4754 if ( DescTSS.Legacy.Gen.u1DescType
4755 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4756 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4757 {
4758 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4759 u8Vector, SelTSS, DescTSS.Legacy.au64));
4760 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4761 }
4762
4763 /* The TSS must be present. */
4764 if (!DescTSS.Legacy.Gen.u1Present)
4765 {
4766 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4767 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4768 }
4769
4770 /* Do the actual task switch. */
4771 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4772 }
4773
4774 /* A null CS is bad. */
4775 RTSEL NewCS = Idte.Gate.u16Sel;
4776 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4777 {
4778 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4779 return iemRaiseGeneralProtectionFault0(pVCpu);
4780 }
4781
4782 /* Fetch the descriptor for the new CS. */
4783 IEMSELDESC DescCS;
4784 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4785 if (rcStrict != VINF_SUCCESS)
4786 {
4787 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4788 return rcStrict;
4789 }
4790
4791 /* Must be a code segment. */
4792 if (!DescCS.Legacy.Gen.u1DescType)
4793 {
4794 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4795 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4796 }
4797 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4798 {
4799 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4800 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4801 }
4802
4803 /* Don't allow lowering the privilege level. */
4804 /** @todo Does the lowering of privileges apply to software interrupts
4805 * only? This has bearings on the more-privileged or
4806 * same-privilege stack behavior further down. A testcase would
4807 * be nice. */
4808 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4809 {
4810 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4811 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4812 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4813 }
4814
4815 /* Make sure the selector is present. */
4816 if (!DescCS.Legacy.Gen.u1Present)
4817 {
4818 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4819 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4820 }
4821
4822 /* Check the new EIP against the new CS limit. */
4823 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4824 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4825 ? Idte.Gate.u16OffsetLow
4826 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4827 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4828 if (uNewEip > cbLimitCS)
4829 {
4830 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4831 u8Vector, uNewEip, cbLimitCS, NewCS));
4832 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4833 }
4834 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4835
4836 /* Calc the flag image to push. */
4837 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4838 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4839 fEfl &= ~X86_EFL_RF;
4840 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4841 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4842
4843 /* From V8086 mode only go to CPL 0. */
4844 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4845 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4846 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4847 {
4848 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4849 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4850 }
4851
4852 /*
4853 * If the privilege level changes, we need to get a new stack from the TSS.
4854 * This in turns means validating the new SS and ESP...
4855 */
4856 if (uNewCpl != pVCpu->iem.s.uCpl)
4857 {
4858 RTSEL NewSS;
4859 uint32_t uNewEsp;
4860 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4861 if (rcStrict != VINF_SUCCESS)
4862 return rcStrict;
4863
4864 IEMSELDESC DescSS;
4865 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4866 if (rcStrict != VINF_SUCCESS)
4867 return rcStrict;
4868 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4869 if (!DescSS.Legacy.Gen.u1DefBig)
4870 {
4871 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4872 uNewEsp = (uint16_t)uNewEsp;
4873 }
4874
4875 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4876
4877 /* Check that there is sufficient space for the stack frame. */
4878 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4879 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4880 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4881 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4882
4883 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4884 {
4885 if ( uNewEsp - 1 > cbLimitSS
4886 || uNewEsp < cbStackFrame)
4887 {
4888 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4889 u8Vector, NewSS, uNewEsp, cbStackFrame));
4890 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4891 }
4892 }
4893 else
4894 {
4895 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4896 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4897 {
4898 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4899 u8Vector, NewSS, uNewEsp, cbStackFrame));
4900 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4901 }
4902 }
4903
4904 /*
4905 * Start making changes.
4906 */
4907
4908 /* Set the new CPL so that stack accesses use it. */
4909 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4910 pVCpu->iem.s.uCpl = uNewCpl;
4911
4912 /* Create the stack frame. */
4913 RTPTRUNION uStackFrame;
4914 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4915 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918 void * const pvStackFrame = uStackFrame.pv;
4919 if (f32BitGate)
4920 {
4921 if (fFlags & IEM_XCPT_FLAGS_ERR)
4922 *uStackFrame.pu32++ = uErr;
4923 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4924 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4925 uStackFrame.pu32[2] = fEfl;
4926 uStackFrame.pu32[3] = pCtx->esp;
4927 uStackFrame.pu32[4] = pCtx->ss.Sel;
4928 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4929 if (fEfl & X86_EFL_VM)
4930 {
4931 uStackFrame.pu32[1] = pCtx->cs.Sel;
4932 uStackFrame.pu32[5] = pCtx->es.Sel;
4933 uStackFrame.pu32[6] = pCtx->ds.Sel;
4934 uStackFrame.pu32[7] = pCtx->fs.Sel;
4935 uStackFrame.pu32[8] = pCtx->gs.Sel;
4936 }
4937 }
4938 else
4939 {
4940 if (fFlags & IEM_XCPT_FLAGS_ERR)
4941 *uStackFrame.pu16++ = uErr;
4942 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4943 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4944 uStackFrame.pu16[2] = fEfl;
4945 uStackFrame.pu16[3] = pCtx->sp;
4946 uStackFrame.pu16[4] = pCtx->ss.Sel;
4947 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4948 if (fEfl & X86_EFL_VM)
4949 {
4950 uStackFrame.pu16[1] = pCtx->cs.Sel;
4951 uStackFrame.pu16[5] = pCtx->es.Sel;
4952 uStackFrame.pu16[6] = pCtx->ds.Sel;
4953 uStackFrame.pu16[7] = pCtx->fs.Sel;
4954 uStackFrame.pu16[8] = pCtx->gs.Sel;
4955 }
4956 }
4957 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4958 if (rcStrict != VINF_SUCCESS)
4959 return rcStrict;
4960
4961 /* Mark the selectors 'accessed' (hope this is the correct time). */
4962 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4963 * after pushing the stack frame? (Write protect the gdt + stack to
4964 * find out.) */
4965 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4966 {
4967 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4968 if (rcStrict != VINF_SUCCESS)
4969 return rcStrict;
4970 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4971 }
4972
4973 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4974 {
4975 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4976 if (rcStrict != VINF_SUCCESS)
4977 return rcStrict;
4978 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4979 }
4980
4981 /*
4982 * Start comitting the register changes (joins with the DPL=CPL branch).
4983 */
4984 pCtx->ss.Sel = NewSS;
4985 pCtx->ss.ValidSel = NewSS;
4986 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4987 pCtx->ss.u32Limit = cbLimitSS;
4988 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4989 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4990 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4991 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4992 * SP is loaded).
4993 * Need to check the other combinations too:
4994 * - 16-bit TSS, 32-bit handler
4995 * - 32-bit TSS, 16-bit handler */
4996 if (!pCtx->ss.Attr.n.u1DefBig)
4997 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4998 else
4999 pCtx->rsp = uNewEsp - cbStackFrame;
5000
5001 if (fEfl & X86_EFL_VM)
5002 {
5003 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5004 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5005 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5006 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5007 }
5008 }
5009 /*
5010 * Same privilege, no stack change and smaller stack frame.
5011 */
5012 else
5013 {
5014 uint64_t uNewRsp;
5015 RTPTRUNION uStackFrame;
5016 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5017 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 void * const pvStackFrame = uStackFrame.pv;
5021
5022 if (f32BitGate)
5023 {
5024 if (fFlags & IEM_XCPT_FLAGS_ERR)
5025 *uStackFrame.pu32++ = uErr;
5026 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5027 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5028 uStackFrame.pu32[2] = fEfl;
5029 }
5030 else
5031 {
5032 if (fFlags & IEM_XCPT_FLAGS_ERR)
5033 *uStackFrame.pu16++ = uErr;
5034 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5035 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5036 uStackFrame.pu16[2] = fEfl;
5037 }
5038 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5039 if (rcStrict != VINF_SUCCESS)
5040 return rcStrict;
5041
5042 /* Mark the CS selector as 'accessed'. */
5043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5044 {
5045 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5046 if (rcStrict != VINF_SUCCESS)
5047 return rcStrict;
5048 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5049 }
5050
5051 /*
5052 * Start committing the register changes (joins with the other branch).
5053 */
5054 pCtx->rsp = uNewRsp;
5055 }
5056
5057 /* ... register committing continues. */
5058 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5059 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5060 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5061 pCtx->cs.u32Limit = cbLimitCS;
5062 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5063 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5064
5065 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5066 fEfl &= ~fEflToClear;
5067 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5068
5069 if (fFlags & IEM_XCPT_FLAGS_CR2)
5070 pCtx->cr2 = uCr2;
5071
5072 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5073 iemRaiseXcptAdjustState(pCtx, u8Vector);
5074
5075 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5076}
5077
5078
5079/**
5080 * Implements exceptions and interrupts for long mode.
5081 *
5082 * @returns VBox strict status code.
5083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5084 * @param pCtx The CPU context.
5085 * @param cbInstr The number of bytes to offset rIP by in the return
5086 * address.
5087 * @param u8Vector The interrupt / exception vector number.
5088 * @param fFlags The flags.
5089 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5090 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5091 */
5092IEM_STATIC VBOXSTRICTRC
5093iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5094 PCPUMCTX pCtx,
5095 uint8_t cbInstr,
5096 uint8_t u8Vector,
5097 uint32_t fFlags,
5098 uint16_t uErr,
5099 uint64_t uCr2)
5100{
5101 /*
5102 * Read the IDT entry.
5103 */
5104 uint16_t offIdt = (uint16_t)u8Vector << 4;
5105 if (pCtx->idtr.cbIdt < offIdt + 7)
5106 {
5107 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5109 }
5110 X86DESC64 Idte;
5111 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5112 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5113 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5114 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5115 return rcStrict;
5116 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5117 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5118 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5119
5120 /*
5121 * Check the descriptor type, DPL and such.
5122 * ASSUMES this is done in the same order as described for call-gate calls.
5123 */
5124 if (Idte.Gate.u1DescType)
5125 {
5126 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5127 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5128 }
5129 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5130 switch (Idte.Gate.u4Type)
5131 {
5132 case AMD64_SEL_TYPE_SYS_INT_GATE:
5133 fEflToClear |= X86_EFL_IF;
5134 break;
5135 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5136 break;
5137
5138 default:
5139 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5140 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5141 }
5142
5143 /* Check DPL against CPL if applicable. */
5144 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5145 {
5146 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5147 {
5148 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5149 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5150 }
5151 }
5152
5153 /* Is it there? */
5154 if (!Idte.Gate.u1Present)
5155 {
5156 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5157 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5158 }
5159
5160 /* A null CS is bad. */
5161 RTSEL NewCS = Idte.Gate.u16Sel;
5162 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5163 {
5164 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5165 return iemRaiseGeneralProtectionFault0(pVCpu);
5166 }
5167
5168 /* Fetch the descriptor for the new CS. */
5169 IEMSELDESC DescCS;
5170 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5171 if (rcStrict != VINF_SUCCESS)
5172 {
5173 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5174 return rcStrict;
5175 }
5176
5177 /* Must be a 64-bit code segment. */
5178 if (!DescCS.Long.Gen.u1DescType)
5179 {
5180 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5181 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5182 }
5183 if ( !DescCS.Long.Gen.u1Long
5184 || DescCS.Long.Gen.u1DefBig
5185 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5186 {
5187 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5188 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5189 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5190 }
5191
5192 /* Don't allow lowering the privilege level. For non-conforming CS
5193 selectors, the CS.DPL sets the privilege level the trap/interrupt
5194 handler runs at. For conforming CS selectors, the CPL remains
5195 unchanged, but the CS.DPL must be <= CPL. */
5196 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5197 * when CPU in Ring-0. Result \#GP? */
5198 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5199 {
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5201 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5202 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5203 }
5204
5205
5206 /* Make sure the selector is present. */
5207 if (!DescCS.Legacy.Gen.u1Present)
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5210 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5211 }
5212
5213 /* Check that the new RIP is canonical. */
5214 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5215 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5216 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5217 if (!IEM_IS_CANONICAL(uNewRip))
5218 {
5219 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5220 return iemRaiseGeneralProtectionFault0(pVCpu);
5221 }
5222
5223 /*
5224 * If the privilege level changes or if the IST isn't zero, we need to get
5225 * a new stack from the TSS.
5226 */
5227 uint64_t uNewRsp;
5228 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5229 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5230 if ( uNewCpl != pVCpu->iem.s.uCpl
5231 || Idte.Gate.u3IST != 0)
5232 {
5233 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5234 if (rcStrict != VINF_SUCCESS)
5235 return rcStrict;
5236 }
5237 else
5238 uNewRsp = pCtx->rsp;
5239 uNewRsp &= ~(uint64_t)0xf;
5240
5241 /*
5242 * Calc the flag image to push.
5243 */
5244 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5245 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5246 fEfl &= ~X86_EFL_RF;
5247 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5248 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5249
5250 /*
5251 * Start making changes.
5252 */
5253 /* Set the new CPL so that stack accesses use it. */
5254 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5255 pVCpu->iem.s.uCpl = uNewCpl;
5256
5257 /* Create the stack frame. */
5258 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5259 RTPTRUNION uStackFrame;
5260 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5261 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5262 if (rcStrict != VINF_SUCCESS)
5263 return rcStrict;
5264 void * const pvStackFrame = uStackFrame.pv;
5265
5266 if (fFlags & IEM_XCPT_FLAGS_ERR)
5267 *uStackFrame.pu64++ = uErr;
5268 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5269 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5270 uStackFrame.pu64[2] = fEfl;
5271 uStackFrame.pu64[3] = pCtx->rsp;
5272 uStackFrame.pu64[4] = pCtx->ss.Sel;
5273 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5274 if (rcStrict != VINF_SUCCESS)
5275 return rcStrict;
5276
5277 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5278 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5279 * after pushing the stack frame? (Write protect the gdt + stack to
5280 * find out.) */
5281 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5282 {
5283 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5284 if (rcStrict != VINF_SUCCESS)
5285 return rcStrict;
5286 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5287 }
5288
5289 /*
5290 * Start comitting the register changes.
5291 */
5292 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5293 * hidden registers when interrupting 32-bit or 16-bit code! */
5294 if (uNewCpl != uOldCpl)
5295 {
5296 pCtx->ss.Sel = 0 | uNewCpl;
5297 pCtx->ss.ValidSel = 0 | uNewCpl;
5298 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5299 pCtx->ss.u32Limit = UINT32_MAX;
5300 pCtx->ss.u64Base = 0;
5301 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5302 }
5303 pCtx->rsp = uNewRsp - cbStackFrame;
5304 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5305 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5306 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5307 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5308 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5309 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5310 pCtx->rip = uNewRip;
5311
5312 fEfl &= ~fEflToClear;
5313 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5314
5315 if (fFlags & IEM_XCPT_FLAGS_CR2)
5316 pCtx->cr2 = uCr2;
5317
5318 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5319 iemRaiseXcptAdjustState(pCtx, u8Vector);
5320
5321 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5322}
5323
5324
5325/**
5326 * Implements exceptions and interrupts.
5327 *
5328 * All exceptions and interrupts goes thru this function!
5329 *
5330 * @returns VBox strict status code.
5331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5332 * @param cbInstr The number of bytes to offset rIP by in the return
5333 * address.
5334 * @param u8Vector The interrupt / exception vector number.
5335 * @param fFlags The flags.
5336 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5337 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5338 */
5339DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5340iemRaiseXcptOrInt(PVMCPU pVCpu,
5341 uint8_t cbInstr,
5342 uint8_t u8Vector,
5343 uint32_t fFlags,
5344 uint16_t uErr,
5345 uint64_t uCr2)
5346{
5347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5348#ifdef IN_RING0
5349 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5350 AssertRCReturn(rc, rc);
5351#endif
5352
5353#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5354 /*
5355 * Flush prefetch buffer
5356 */
5357 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5358#endif
5359
5360 /*
5361 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5362 */
5363 if ( pCtx->eflags.Bits.u1VM
5364 && pCtx->eflags.Bits.u2IOPL != 3
5365 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5366 && (pCtx->cr0 & X86_CR0_PE) )
5367 {
5368 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5369 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5370 u8Vector = X86_XCPT_GP;
5371 uErr = 0;
5372 }
5373#ifdef DBGFTRACE_ENABLED
5374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5375 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5376 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5377#endif
5378
5379#ifdef VBOX_WITH_NESTED_HWVIRT
5380 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5381 {
5382 /*
5383 * If the event is being injected as part of VMRUN, it isn't subject to event
5384 * intercepts in the nested-guest. However, secondary exceptions that occur
5385 * during injection of any event -are- subject to exception intercepts.
5386 * See AMD spec. 15.20 "Event Injection".
5387 */
5388 if (!pCtx->hwvirt.svm.fInterceptEvents)
5389 pCtx->hwvirt.svm.fInterceptEvents = 1;
5390 else
5391 {
5392 /*
5393 * Check and handle if the event being raised is intercepted.
5394 */
5395 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5396 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5397 return rcStrict0;
5398 }
5399 }
5400#endif /* VBOX_WITH_NESTED_HWVIRT */
5401
5402 /*
5403 * Do recursion accounting.
5404 */
5405 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5406 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5407 if (pVCpu->iem.s.cXcptRecursions == 0)
5408 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5409 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5410 else
5411 {
5412 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5413 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5414 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5415
5416 if (pVCpu->iem.s.cXcptRecursions >= 3)
5417 {
5418#ifdef DEBUG_bird
5419 AssertFailed();
5420#endif
5421 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5422 }
5423
5424 /*
5425 * Evaluate the sequence of recurring events.
5426 */
5427 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5428 NULL /* pXcptRaiseInfo */);
5429 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5430 { /* likely */ }
5431 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5432 {
5433 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5434 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5435 u8Vector = X86_XCPT_DF;
5436 uErr = 0;
5437 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5438 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5439 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5440 }
5441 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5442 {
5443 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5444 return iemInitiateCpuShutdown(pVCpu);
5445 }
5446 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5447 {
5448 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5449 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5450 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5451 return VERR_EM_GUEST_CPU_HANG;
5452 }
5453 else
5454 {
5455 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5456 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5457 return VERR_IEM_IPE_9;
5458 }
5459
5460 /*
5461 * The 'EXT' bit is set when an exception occurs during deliver of an external
5462 * event (such as an interrupt or earlier exception)[1]. Privileged software
5463 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5464 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5465 *
5466 * [1] - Intel spec. 6.13 "Error Code"
5467 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5468 * [3] - Intel Instruction reference for INT n.
5469 */
5470 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5471 && (fFlags & IEM_XCPT_FLAGS_ERR)
5472 && u8Vector != X86_XCPT_PF
5473 && u8Vector != X86_XCPT_DF)
5474 {
5475 uErr |= X86_TRAP_ERR_EXTERNAL;
5476 }
5477 }
5478
5479 pVCpu->iem.s.cXcptRecursions++;
5480 pVCpu->iem.s.uCurXcpt = u8Vector;
5481 pVCpu->iem.s.fCurXcpt = fFlags;
5482 pVCpu->iem.s.uCurXcptErr = uErr;
5483 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5484
5485 /*
5486 * Extensive logging.
5487 */
5488#if defined(LOG_ENABLED) && defined(IN_RING3)
5489 if (LogIs3Enabled())
5490 {
5491 PVM pVM = pVCpu->CTX_SUFF(pVM);
5492 char szRegs[4096];
5493 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5494 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5495 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5496 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5497 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5498 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5499 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5500 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5501 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5502 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5503 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5504 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5505 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5506 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5507 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5508 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5509 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5510 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5511 " efer=%016VR{efer}\n"
5512 " pat=%016VR{pat}\n"
5513 " sf_mask=%016VR{sf_mask}\n"
5514 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5515 " lstar=%016VR{lstar}\n"
5516 " star=%016VR{star} cstar=%016VR{cstar}\n"
5517 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5518 );
5519
5520 char szInstr[256];
5521 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5522 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5523 szInstr, sizeof(szInstr), NULL);
5524 Log3(("%s%s\n", szRegs, szInstr));
5525 }
5526#endif /* LOG_ENABLED */
5527
5528 /*
5529 * Call the mode specific worker function.
5530 */
5531 VBOXSTRICTRC rcStrict;
5532 if (!(pCtx->cr0 & X86_CR0_PE))
5533 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5534 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5535 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5536 else
5537 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5538
5539 /* Flush the prefetch buffer. */
5540#ifdef IEM_WITH_CODE_TLB
5541 pVCpu->iem.s.pbInstrBuf = NULL;
5542#else
5543 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5544#endif
5545
5546 /*
5547 * Unwind.
5548 */
5549 pVCpu->iem.s.cXcptRecursions--;
5550 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5551 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5552 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5553 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5554 return rcStrict;
5555}
5556
5557#ifdef IEM_WITH_SETJMP
5558/**
5559 * See iemRaiseXcptOrInt. Will not return.
5560 */
5561IEM_STATIC DECL_NO_RETURN(void)
5562iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5563 uint8_t cbInstr,
5564 uint8_t u8Vector,
5565 uint32_t fFlags,
5566 uint16_t uErr,
5567 uint64_t uCr2)
5568{
5569 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5570 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5571}
5572#endif
5573
5574
5575/** \#DE - 00. */
5576DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5577{
5578 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5579}
5580
5581
5582/** \#DB - 01.
5583 * @note This automatically clear DR7.GD. */
5584DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5585{
5586 /** @todo set/clear RF. */
5587 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5588 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5589}
5590
5591
5592/** \#BR - 05. */
5593DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5594{
5595 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5596}
5597
5598
5599/** \#UD - 06. */
5600DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5601{
5602 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5603}
5604
5605
5606/** \#NM - 07. */
5607DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5608{
5609 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5610}
5611
5612
5613/** \#TS(err) - 0a. */
5614DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5615{
5616 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5617}
5618
5619
5620/** \#TS(tr) - 0a. */
5621DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5622{
5623 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5624 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5625}
5626
5627
5628/** \#TS(0) - 0a. */
5629DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5630{
5631 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5632 0, 0);
5633}
5634
5635
5636/** \#TS(err) - 0a. */
5637DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5638{
5639 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5640 uSel & X86_SEL_MASK_OFF_RPL, 0);
5641}
5642
5643
5644/** \#NP(err) - 0b. */
5645DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5646{
5647 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5648}
5649
5650
5651/** \#NP(sel) - 0b. */
5652DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5653{
5654 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5655 uSel & ~X86_SEL_RPL, 0);
5656}
5657
5658
5659/** \#SS(seg) - 0c. */
5660DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5661{
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5663 uSel & ~X86_SEL_RPL, 0);
5664}
5665
5666
5667/** \#SS(err) - 0c. */
5668DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5669{
5670 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5671}
5672
5673
5674/** \#GP(n) - 0d. */
5675DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5676{
5677 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5678}
5679
5680
5681/** \#GP(0) - 0d. */
5682DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5683{
5684 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5685}
5686
5687#ifdef IEM_WITH_SETJMP
5688/** \#GP(0) - 0d. */
5689DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5690{
5691 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5692}
5693#endif
5694
5695
5696/** \#GP(sel) - 0d. */
5697DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5698{
5699 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5700 Sel & ~X86_SEL_RPL, 0);
5701}
5702
5703
5704/** \#GP(0) - 0d. */
5705DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5706{
5707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5708}
5709
5710
5711/** \#GP(sel) - 0d. */
5712DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5713{
5714 NOREF(iSegReg); NOREF(fAccess);
5715 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5716 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5717}
5718
5719#ifdef IEM_WITH_SETJMP
5720/** \#GP(sel) - 0d, longjmp. */
5721DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5722{
5723 NOREF(iSegReg); NOREF(fAccess);
5724 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5725 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5726}
5727#endif
5728
5729/** \#GP(sel) - 0d. */
5730DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5731{
5732 NOREF(Sel);
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735
5736#ifdef IEM_WITH_SETJMP
5737/** \#GP(sel) - 0d, longjmp. */
5738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5739{
5740 NOREF(Sel);
5741 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5742}
5743#endif
5744
5745
5746/** \#GP(sel) - 0d. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5748{
5749 NOREF(iSegReg); NOREF(fAccess);
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5751}
5752
5753#ifdef IEM_WITH_SETJMP
5754/** \#GP(sel) - 0d, longjmp. */
5755DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5756 uint32_t fAccess)
5757{
5758 NOREF(iSegReg); NOREF(fAccess);
5759 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5760}
5761#endif
5762
5763
5764/** \#PF(n) - 0e. */
5765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5766{
5767 uint16_t uErr;
5768 switch (rc)
5769 {
5770 case VERR_PAGE_NOT_PRESENT:
5771 case VERR_PAGE_TABLE_NOT_PRESENT:
5772 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5773 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5774 uErr = 0;
5775 break;
5776
5777 default:
5778 AssertMsgFailed(("%Rrc\n", rc));
5779 RT_FALL_THRU();
5780 case VERR_ACCESS_DENIED:
5781 uErr = X86_TRAP_PF_P;
5782 break;
5783
5784 /** @todo reserved */
5785 }
5786
5787 if (pVCpu->iem.s.uCpl == 3)
5788 uErr |= X86_TRAP_PF_US;
5789
5790 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5791 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5792 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5793 uErr |= X86_TRAP_PF_ID;
5794
5795#if 0 /* This is so much non-sense, really. Why was it done like that? */
5796 /* Note! RW access callers reporting a WRITE protection fault, will clear
5797 the READ flag before calling. So, read-modify-write accesses (RW)
5798 can safely be reported as READ faults. */
5799 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5800 uErr |= X86_TRAP_PF_RW;
5801#else
5802 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5803 {
5804 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5805 uErr |= X86_TRAP_PF_RW;
5806 }
5807#endif
5808
5809 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5810 uErr, GCPtrWhere);
5811}
5812
5813#ifdef IEM_WITH_SETJMP
5814/** \#PF(n) - 0e, longjmp. */
5815IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5816{
5817 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5818}
5819#endif
5820
5821
5822/** \#MF(0) - 10. */
5823DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5824{
5825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5826}
5827
5828
5829/** \#AC(0) - 11. */
5830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5831{
5832 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5833}
5834
5835
5836/**
5837 * Macro for calling iemCImplRaiseDivideError().
5838 *
5839 * This enables us to add/remove arguments and force different levels of
5840 * inlining as we wish.
5841 *
5842 * @return Strict VBox status code.
5843 */
5844#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5845IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5846{
5847 NOREF(cbInstr);
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5849}
5850
5851
5852/**
5853 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5854 *
5855 * This enables us to add/remove arguments and force different levels of
5856 * inlining as we wish.
5857 *
5858 * @return Strict VBox status code.
5859 */
5860#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5861IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5862{
5863 NOREF(cbInstr);
5864 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5865}
5866
5867
5868/**
5869 * Macro for calling iemCImplRaiseInvalidOpcode().
5870 *
5871 * This enables us to add/remove arguments and force different levels of
5872 * inlining as we wish.
5873 *
5874 * @return Strict VBox status code.
5875 */
5876#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5877IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5878{
5879 NOREF(cbInstr);
5880 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5881}
5882
5883
5884/** @} */
5885
5886
5887/*
5888 *
5889 * Helpers routines.
5890 * Helpers routines.
5891 * Helpers routines.
5892 *
5893 */
5894
5895/**
5896 * Recalculates the effective operand size.
5897 *
5898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5899 */
5900IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5901{
5902 switch (pVCpu->iem.s.enmCpuMode)
5903 {
5904 case IEMMODE_16BIT:
5905 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5906 break;
5907 case IEMMODE_32BIT:
5908 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5909 break;
5910 case IEMMODE_64BIT:
5911 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5912 {
5913 case 0:
5914 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5915 break;
5916 case IEM_OP_PRF_SIZE_OP:
5917 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5918 break;
5919 case IEM_OP_PRF_SIZE_REX_W:
5920 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5921 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5922 break;
5923 }
5924 break;
5925 default:
5926 AssertFailed();
5927 }
5928}
5929
5930
5931/**
5932 * Sets the default operand size to 64-bit and recalculates the effective
5933 * operand size.
5934 *
5935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5936 */
5937IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5938{
5939 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5940 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5941 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5942 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5943 else
5944 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5945}
5946
5947
5948/*
5949 *
5950 * Common opcode decoders.
5951 * Common opcode decoders.
5952 * Common opcode decoders.
5953 *
5954 */
5955//#include <iprt/mem.h>
5956
5957/**
5958 * Used to add extra details about a stub case.
5959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5960 */
5961IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5962{
5963#if defined(LOG_ENABLED) && defined(IN_RING3)
5964 PVM pVM = pVCpu->CTX_SUFF(pVM);
5965 char szRegs[4096];
5966 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5967 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5968 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5969 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5970 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5971 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5972 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5973 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5974 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5975 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5976 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5977 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5978 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5979 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5980 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5981 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5982 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5983 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5984 " efer=%016VR{efer}\n"
5985 " pat=%016VR{pat}\n"
5986 " sf_mask=%016VR{sf_mask}\n"
5987 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5988 " lstar=%016VR{lstar}\n"
5989 " star=%016VR{star} cstar=%016VR{cstar}\n"
5990 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5991 );
5992
5993 char szInstr[256];
5994 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5995 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5996 szInstr, sizeof(szInstr), NULL);
5997
5998 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5999#else
6000 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6001#endif
6002}
6003
6004/**
6005 * Complains about a stub.
6006 *
6007 * Providing two versions of this macro, one for daily use and one for use when
6008 * working on IEM.
6009 */
6010#if 0
6011# define IEMOP_BITCH_ABOUT_STUB() \
6012 do { \
6013 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6014 iemOpStubMsg2(pVCpu); \
6015 RTAssertPanic(); \
6016 } while (0)
6017#else
6018# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6019#endif
6020
6021/** Stubs an opcode. */
6022#define FNIEMOP_STUB(a_Name) \
6023 FNIEMOP_DEF(a_Name) \
6024 { \
6025 RT_NOREF_PV(pVCpu); \
6026 IEMOP_BITCH_ABOUT_STUB(); \
6027 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6028 } \
6029 typedef int ignore_semicolon
6030
6031/** Stubs an opcode. */
6032#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6033 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6034 { \
6035 RT_NOREF_PV(pVCpu); \
6036 RT_NOREF_PV(a_Name0); \
6037 IEMOP_BITCH_ABOUT_STUB(); \
6038 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6039 } \
6040 typedef int ignore_semicolon
6041
6042/** Stubs an opcode which currently should raise \#UD. */
6043#define FNIEMOP_UD_STUB(a_Name) \
6044 FNIEMOP_DEF(a_Name) \
6045 { \
6046 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6047 return IEMOP_RAISE_INVALID_OPCODE(); \
6048 } \
6049 typedef int ignore_semicolon
6050
6051/** Stubs an opcode which currently should raise \#UD. */
6052#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6053 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6054 { \
6055 RT_NOREF_PV(pVCpu); \
6056 RT_NOREF_PV(a_Name0); \
6057 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6058 return IEMOP_RAISE_INVALID_OPCODE(); \
6059 } \
6060 typedef int ignore_semicolon
6061
6062
6063
6064/** @name Register Access.
6065 * @{
6066 */
6067
6068/**
6069 * Gets a reference (pointer) to the specified hidden segment register.
6070 *
6071 * @returns Hidden register reference.
6072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6073 * @param iSegReg The segment register.
6074 */
6075IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6076{
6077 Assert(iSegReg < X86_SREG_COUNT);
6078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6079 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6080
6081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6082 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6083 { /* likely */ }
6084 else
6085 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6086#else
6087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6088#endif
6089 return pSReg;
6090}
6091
6092
6093/**
6094 * Ensures that the given hidden segment register is up to date.
6095 *
6096 * @returns Hidden register reference.
6097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6098 * @param pSReg The segment register.
6099 */
6100IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6101{
6102#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6103 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6104 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6105#else
6106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6107 NOREF(pVCpu);
6108#endif
6109 return pSReg;
6110}
6111
6112
6113/**
6114 * Gets a reference (pointer) to the specified segment register (the selector
6115 * value).
6116 *
6117 * @returns Pointer to the selector variable.
6118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6119 * @param iSegReg The segment register.
6120 */
6121DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6122{
6123 Assert(iSegReg < X86_SREG_COUNT);
6124 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6125 return &pCtx->aSRegs[iSegReg].Sel;
6126}
6127
6128
6129/**
6130 * Fetches the selector value of a segment register.
6131 *
6132 * @returns The selector value.
6133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6134 * @param iSegReg The segment register.
6135 */
6136DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6137{
6138 Assert(iSegReg < X86_SREG_COUNT);
6139 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6140}
6141
6142
6143/**
6144 * Gets a reference (pointer) to the specified general purpose register.
6145 *
6146 * @returns Register reference.
6147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6148 * @param iReg The general purpose register.
6149 */
6150DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6151{
6152 Assert(iReg < 16);
6153 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6154 return &pCtx->aGRegs[iReg];
6155}
6156
6157
6158/**
6159 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6160 *
6161 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6162 *
6163 * @returns Register reference.
6164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6165 * @param iReg The register.
6166 */
6167DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6168{
6169 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6170 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6171 {
6172 Assert(iReg < 16);
6173 return &pCtx->aGRegs[iReg].u8;
6174 }
6175 /* high 8-bit register. */
6176 Assert(iReg < 8);
6177 return &pCtx->aGRegs[iReg & 3].bHi;
6178}
6179
6180
6181/**
6182 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6183 *
6184 * @returns Register reference.
6185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6186 * @param iReg The register.
6187 */
6188DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6189{
6190 Assert(iReg < 16);
6191 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6192 return &pCtx->aGRegs[iReg].u16;
6193}
6194
6195
6196/**
6197 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6198 *
6199 * @returns Register reference.
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 * @param iReg The register.
6202 */
6203DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6204{
6205 Assert(iReg < 16);
6206 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6207 return &pCtx->aGRegs[iReg].u32;
6208}
6209
6210
6211/**
6212 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6213 *
6214 * @returns Register reference.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iReg The register.
6217 */
6218DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6219{
6220 Assert(iReg < 64);
6221 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6222 return &pCtx->aGRegs[iReg].u64;
6223}
6224
6225
6226/**
6227 * Fetches the value of a 8-bit general purpose register.
6228 *
6229 * @returns The register value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iReg The register.
6232 */
6233DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6234{
6235 return *iemGRegRefU8(pVCpu, iReg);
6236}
6237
6238
6239/**
6240 * Fetches the value of a 16-bit general purpose register.
6241 *
6242 * @returns The register value.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param iReg The register.
6245 */
6246DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6247{
6248 Assert(iReg < 16);
6249 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6250}
6251
6252
6253/**
6254 * Fetches the value of a 32-bit general purpose register.
6255 *
6256 * @returns The register value.
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param iReg The register.
6259 */
6260DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6261{
6262 Assert(iReg < 16);
6263 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6264}
6265
6266
6267/**
6268 * Fetches the value of a 64-bit general purpose register.
6269 *
6270 * @returns The register value.
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 * @param iReg The register.
6273 */
6274DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6275{
6276 Assert(iReg < 16);
6277 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6278}
6279
6280
6281/**
6282 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6283 *
6284 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6285 * segment limit.
6286 *
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param offNextInstr The offset of the next instruction.
6289 */
6290IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6291{
6292 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6293 switch (pVCpu->iem.s.enmEffOpSize)
6294 {
6295 case IEMMODE_16BIT:
6296 {
6297 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6298 if ( uNewIp > pCtx->cs.u32Limit
6299 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6300 return iemRaiseGeneralProtectionFault0(pVCpu);
6301 pCtx->rip = uNewIp;
6302 break;
6303 }
6304
6305 case IEMMODE_32BIT:
6306 {
6307 Assert(pCtx->rip <= UINT32_MAX);
6308 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6309
6310 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6311 if (uNewEip > pCtx->cs.u32Limit)
6312 return iemRaiseGeneralProtectionFault0(pVCpu);
6313 pCtx->rip = uNewEip;
6314 break;
6315 }
6316
6317 case IEMMODE_64BIT:
6318 {
6319 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6320
6321 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6322 if (!IEM_IS_CANONICAL(uNewRip))
6323 return iemRaiseGeneralProtectionFault0(pVCpu);
6324 pCtx->rip = uNewRip;
6325 break;
6326 }
6327
6328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6329 }
6330
6331 pCtx->eflags.Bits.u1RF = 0;
6332
6333#ifndef IEM_WITH_CODE_TLB
6334 /* Flush the prefetch buffer. */
6335 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6336#endif
6337
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/**
6343 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6344 *
6345 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6346 * segment limit.
6347 *
6348 * @returns Strict VBox status code.
6349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6350 * @param offNextInstr The offset of the next instruction.
6351 */
6352IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6353{
6354 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6355 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6356
6357 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6358 if ( uNewIp > pCtx->cs.u32Limit
6359 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6360 return iemRaiseGeneralProtectionFault0(pVCpu);
6361 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6362 pCtx->rip = uNewIp;
6363 pCtx->eflags.Bits.u1RF = 0;
6364
6365#ifndef IEM_WITH_CODE_TLB
6366 /* Flush the prefetch buffer. */
6367 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6368#endif
6369
6370 return VINF_SUCCESS;
6371}
6372
6373
6374/**
6375 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6376 *
6377 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6378 * segment limit.
6379 *
6380 * @returns Strict VBox status code.
6381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6382 * @param offNextInstr The offset of the next instruction.
6383 */
6384IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6385{
6386 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6387 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6388
6389 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6390 {
6391 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6392
6393 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6394 if (uNewEip > pCtx->cs.u32Limit)
6395 return iemRaiseGeneralProtectionFault0(pVCpu);
6396 pCtx->rip = uNewEip;
6397 }
6398 else
6399 {
6400 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6401
6402 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6403 if (!IEM_IS_CANONICAL(uNewRip))
6404 return iemRaiseGeneralProtectionFault0(pVCpu);
6405 pCtx->rip = uNewRip;
6406 }
6407 pCtx->eflags.Bits.u1RF = 0;
6408
6409#ifndef IEM_WITH_CODE_TLB
6410 /* Flush the prefetch buffer. */
6411 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6412#endif
6413
6414 return VINF_SUCCESS;
6415}
6416
6417
6418/**
6419 * Performs a near jump to the specified address.
6420 *
6421 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6422 * segment limit.
6423 *
6424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6425 * @param uNewRip The new RIP value.
6426 */
6427IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6428{
6429 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6430 switch (pVCpu->iem.s.enmEffOpSize)
6431 {
6432 case IEMMODE_16BIT:
6433 {
6434 Assert(uNewRip <= UINT16_MAX);
6435 if ( uNewRip > pCtx->cs.u32Limit
6436 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6437 return iemRaiseGeneralProtectionFault0(pVCpu);
6438 /** @todo Test 16-bit jump in 64-bit mode. */
6439 pCtx->rip = uNewRip;
6440 break;
6441 }
6442
6443 case IEMMODE_32BIT:
6444 {
6445 Assert(uNewRip <= UINT32_MAX);
6446 Assert(pCtx->rip <= UINT32_MAX);
6447 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6448
6449 if (uNewRip > pCtx->cs.u32Limit)
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 pCtx->rip = uNewRip;
6452 break;
6453 }
6454
6455 case IEMMODE_64BIT:
6456 {
6457 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6458
6459 if (!IEM_IS_CANONICAL(uNewRip))
6460 return iemRaiseGeneralProtectionFault0(pVCpu);
6461 pCtx->rip = uNewRip;
6462 break;
6463 }
6464
6465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6466 }
6467
6468 pCtx->eflags.Bits.u1RF = 0;
6469
6470#ifndef IEM_WITH_CODE_TLB
6471 /* Flush the prefetch buffer. */
6472 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6473#endif
6474
6475 return VINF_SUCCESS;
6476}
6477
6478
6479/**
6480 * Get the address of the top of the stack.
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param pCtx The CPU context which SP/ESP/RSP should be
6484 * read.
6485 */
6486DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6487{
6488 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6489 return pCtx->rsp;
6490 if (pCtx->ss.Attr.n.u1DefBig)
6491 return pCtx->esp;
6492 return pCtx->sp;
6493}
6494
6495
6496/**
6497 * Updates the RIP/EIP/IP to point to the next instruction.
6498 *
6499 * This function leaves the EFLAGS.RF flag alone.
6500 *
6501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6502 * @param cbInstr The number of bytes to add.
6503 */
6504IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6505{
6506 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6507 switch (pVCpu->iem.s.enmCpuMode)
6508 {
6509 case IEMMODE_16BIT:
6510 Assert(pCtx->rip <= UINT16_MAX);
6511 pCtx->eip += cbInstr;
6512 pCtx->eip &= UINT32_C(0xffff);
6513 break;
6514
6515 case IEMMODE_32BIT:
6516 pCtx->eip += cbInstr;
6517 Assert(pCtx->rip <= UINT32_MAX);
6518 break;
6519
6520 case IEMMODE_64BIT:
6521 pCtx->rip += cbInstr;
6522 break;
6523 default: AssertFailed();
6524 }
6525}
6526
6527
6528#if 0
6529/**
6530 * Updates the RIP/EIP/IP to point to the next instruction.
6531 *
6532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6533 */
6534IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6535{
6536 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6537}
6538#endif
6539
6540
6541
6542/**
6543 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6544 *
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 * @param cbInstr The number of bytes to add.
6547 */
6548IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6549{
6550 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6551
6552 pCtx->eflags.Bits.u1RF = 0;
6553
6554 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6555#if ARCH_BITS >= 64
6556 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6557 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6558 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6559#else
6560 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6561 pCtx->rip += cbInstr;
6562 else
6563 {
6564 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6565 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6566 }
6567#endif
6568}
6569
6570
6571/**
6572 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6573 *
6574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6575 */
6576IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6577{
6578 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6579}
6580
6581
6582/**
6583 * Adds to the stack pointer.
6584 *
6585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6586 * @param pCtx The CPU context which SP/ESP/RSP should be
6587 * updated.
6588 * @param cbToAdd The number of bytes to add (8-bit!).
6589 */
6590DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6591{
6592 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6593 pCtx->rsp += cbToAdd;
6594 else if (pCtx->ss.Attr.n.u1DefBig)
6595 pCtx->esp += cbToAdd;
6596 else
6597 pCtx->sp += cbToAdd;
6598}
6599
6600
6601/**
6602 * Subtracts from the stack pointer.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 * @param pCtx The CPU context which SP/ESP/RSP should be
6606 * updated.
6607 * @param cbToSub The number of bytes to subtract (8-bit!).
6608 */
6609DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6610{
6611 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6612 pCtx->rsp -= cbToSub;
6613 else if (pCtx->ss.Attr.n.u1DefBig)
6614 pCtx->esp -= cbToSub;
6615 else
6616 pCtx->sp -= cbToSub;
6617}
6618
6619
6620/**
6621 * Adds to the temporary stack pointer.
6622 *
6623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6624 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6625 * @param cbToAdd The number of bytes to add (16-bit).
6626 * @param pCtx Where to get the current stack mode.
6627 */
6628DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6629{
6630 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6631 pTmpRsp->u += cbToAdd;
6632 else if (pCtx->ss.Attr.n.u1DefBig)
6633 pTmpRsp->DWords.dw0 += cbToAdd;
6634 else
6635 pTmpRsp->Words.w0 += cbToAdd;
6636}
6637
6638
6639/**
6640 * Subtracts from the temporary stack pointer.
6641 *
6642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6643 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6644 * @param cbToSub The number of bytes to subtract.
6645 * @param pCtx Where to get the current stack mode.
6646 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6647 * expecting that.
6648 */
6649DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6650{
6651 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6652 pTmpRsp->u -= cbToSub;
6653 else if (pCtx->ss.Attr.n.u1DefBig)
6654 pTmpRsp->DWords.dw0 -= cbToSub;
6655 else
6656 pTmpRsp->Words.w0 -= cbToSub;
6657}
6658
6659
6660/**
6661 * Calculates the effective stack address for a push of the specified size as
6662 * well as the new RSP value (upper bits may be masked).
6663 *
6664 * @returns Effective stack addressf for the push.
6665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6666 * @param pCtx Where to get the current stack mode.
6667 * @param cbItem The size of the stack item to pop.
6668 * @param puNewRsp Where to return the new RSP value.
6669 */
6670DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6671{
6672 RTUINT64U uTmpRsp;
6673 RTGCPTR GCPtrTop;
6674 uTmpRsp.u = pCtx->rsp;
6675
6676 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6677 GCPtrTop = uTmpRsp.u -= cbItem;
6678 else if (pCtx->ss.Attr.n.u1DefBig)
6679 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6680 else
6681 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6682 *puNewRsp = uTmpRsp.u;
6683 return GCPtrTop;
6684}
6685
6686
6687/**
6688 * Gets the current stack pointer and calculates the value after a pop of the
6689 * specified size.
6690 *
6691 * @returns Current stack pointer.
6692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6693 * @param pCtx Where to get the current stack mode.
6694 * @param cbItem The size of the stack item to pop.
6695 * @param puNewRsp Where to return the new RSP value.
6696 */
6697DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6698{
6699 RTUINT64U uTmpRsp;
6700 RTGCPTR GCPtrTop;
6701 uTmpRsp.u = pCtx->rsp;
6702
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 {
6705 GCPtrTop = uTmpRsp.u;
6706 uTmpRsp.u += cbItem;
6707 }
6708 else if (pCtx->ss.Attr.n.u1DefBig)
6709 {
6710 GCPtrTop = uTmpRsp.DWords.dw0;
6711 uTmpRsp.DWords.dw0 += cbItem;
6712 }
6713 else
6714 {
6715 GCPtrTop = uTmpRsp.Words.w0;
6716 uTmpRsp.Words.w0 += cbItem;
6717 }
6718 *puNewRsp = uTmpRsp.u;
6719 return GCPtrTop;
6720}
6721
6722
6723/**
6724 * Calculates the effective stack address for a push of the specified size as
6725 * well as the new temporary RSP value (upper bits may be masked).
6726 *
6727 * @returns Effective stack addressf for the push.
6728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6729 * @param pCtx Where to get the current stack mode.
6730 * @param pTmpRsp The temporary stack pointer. This is updated.
6731 * @param cbItem The size of the stack item to pop.
6732 */
6733DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6734{
6735 RTGCPTR GCPtrTop;
6736
6737 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6738 GCPtrTop = pTmpRsp->u -= cbItem;
6739 else if (pCtx->ss.Attr.n.u1DefBig)
6740 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6741 else
6742 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6743 return GCPtrTop;
6744}
6745
6746
6747/**
6748 * Gets the effective stack address for a pop of the specified size and
6749 * calculates and updates the temporary RSP.
6750 *
6751 * @returns Current stack pointer.
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param pCtx Where to get the current stack mode.
6754 * @param pTmpRsp The temporary stack pointer. This is updated.
6755 * @param cbItem The size of the stack item to pop.
6756 */
6757DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6758{
6759 RTGCPTR GCPtrTop;
6760 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6761 {
6762 GCPtrTop = pTmpRsp->u;
6763 pTmpRsp->u += cbItem;
6764 }
6765 else if (pCtx->ss.Attr.n.u1DefBig)
6766 {
6767 GCPtrTop = pTmpRsp->DWords.dw0;
6768 pTmpRsp->DWords.dw0 += cbItem;
6769 }
6770 else
6771 {
6772 GCPtrTop = pTmpRsp->Words.w0;
6773 pTmpRsp->Words.w0 += cbItem;
6774 }
6775 return GCPtrTop;
6776}
6777
6778/** @} */
6779
6780
6781/** @name FPU access and helpers.
6782 *
6783 * @{
6784 */
6785
6786
6787/**
6788 * Hook for preparing to use the host FPU.
6789 *
6790 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6791 *
6792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6793 */
6794DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6795{
6796#ifdef IN_RING3
6797 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6798#else
6799 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6800#endif
6801}
6802
6803
6804/**
6805 * Hook for preparing to use the host FPU for SSE.
6806 *
6807 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6808 *
6809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6810 */
6811DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6812{
6813 iemFpuPrepareUsage(pVCpu);
6814}
6815
6816
6817/**
6818 * Hook for preparing to use the host FPU for AVX.
6819 *
6820 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6821 *
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 */
6824DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6825{
6826 iemFpuPrepareUsage(pVCpu);
6827}
6828
6829
6830/**
6831 * Hook for actualizing the guest FPU state before the interpreter reads it.
6832 *
6833 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6834 *
6835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6836 */
6837DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6838{
6839#ifdef IN_RING3
6840 NOREF(pVCpu);
6841#else
6842 CPUMRZFpuStateActualizeForRead(pVCpu);
6843#endif
6844}
6845
6846
6847/**
6848 * Hook for actualizing the guest FPU state before the interpreter changes it.
6849 *
6850 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6851 *
6852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6853 */
6854DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6855{
6856#ifdef IN_RING3
6857 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6858#else
6859 CPUMRZFpuStateActualizeForChange(pVCpu);
6860#endif
6861}
6862
6863
6864/**
6865 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6866 * only.
6867 *
6868 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6869 *
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 */
6872DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6873{
6874#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6875 NOREF(pVCpu);
6876#else
6877 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6878#endif
6879}
6880
6881
6882/**
6883 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6884 * read+write.
6885 *
6886 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 */
6890DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6891{
6892#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6893 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6894#else
6895 CPUMRZFpuStateActualizeForChange(pVCpu);
6896#endif
6897}
6898
6899
6900/**
6901 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6902 * only.
6903 *
6904 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6905 *
6906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6907 */
6908DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6909{
6910#ifdef IN_RING3
6911 NOREF(pVCpu);
6912#else
6913 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6914#endif
6915}
6916
6917
6918/**
6919 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6920 * read+write.
6921 *
6922 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6923 *
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 */
6926DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6927{
6928#ifdef IN_RING3
6929 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6930#else
6931 CPUMRZFpuStateActualizeForChange(pVCpu);
6932#endif
6933}
6934
6935
6936/**
6937 * Stores a QNaN value into a FPU register.
6938 *
6939 * @param pReg Pointer to the register.
6940 */
6941DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6942{
6943 pReg->au32[0] = UINT32_C(0x00000000);
6944 pReg->au32[1] = UINT32_C(0xc0000000);
6945 pReg->au16[4] = UINT16_C(0xffff);
6946}
6947
6948
6949/**
6950 * Updates the FOP, FPU.CS and FPUIP registers.
6951 *
6952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6953 * @param pCtx The CPU context.
6954 * @param pFpuCtx The FPU context.
6955 */
6956DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6957{
6958 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6959 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6960 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6961 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6962 {
6963 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6964 * happens in real mode here based on the fnsave and fnstenv images. */
6965 pFpuCtx->CS = 0;
6966 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6967 }
6968 else
6969 {
6970 pFpuCtx->CS = pCtx->cs.Sel;
6971 pFpuCtx->FPUIP = pCtx->rip;
6972 }
6973}
6974
6975
6976/**
6977 * Updates the x87.DS and FPUDP registers.
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 * @param pCtx The CPU context.
6981 * @param pFpuCtx The FPU context.
6982 * @param iEffSeg The effective segment register.
6983 * @param GCPtrEff The effective address relative to @a iEffSeg.
6984 */
6985DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6986{
6987 RTSEL sel;
6988 switch (iEffSeg)
6989 {
6990 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6991 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6992 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6993 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6994 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6995 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6996 default:
6997 AssertMsgFailed(("%d\n", iEffSeg));
6998 sel = pCtx->ds.Sel;
6999 }
7000 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7001 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7002 {
7003 pFpuCtx->DS = 0;
7004 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7005 }
7006 else
7007 {
7008 pFpuCtx->DS = sel;
7009 pFpuCtx->FPUDP = GCPtrEff;
7010 }
7011}
7012
7013
7014/**
7015 * Rotates the stack registers in the push direction.
7016 *
7017 * @param pFpuCtx The FPU context.
7018 * @remarks This is a complete waste of time, but fxsave stores the registers in
7019 * stack order.
7020 */
7021DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7022{
7023 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7024 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7025 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7026 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7027 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7028 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7029 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7030 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7031 pFpuCtx->aRegs[0].r80 = r80Tmp;
7032}
7033
7034
7035/**
7036 * Rotates the stack registers in the pop direction.
7037 *
7038 * @param pFpuCtx The FPU context.
7039 * @remarks This is a complete waste of time, but fxsave stores the registers in
7040 * stack order.
7041 */
7042DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7043{
7044 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7045 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7046 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7047 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7048 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7049 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7050 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7051 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7052 pFpuCtx->aRegs[7].r80 = r80Tmp;
7053}
7054
7055
7056/**
7057 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7058 * exception prevents it.
7059 *
7060 * @param pResult The FPU operation result to push.
7061 * @param pFpuCtx The FPU context.
7062 */
7063IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7064{
7065 /* Update FSW and bail if there are pending exceptions afterwards. */
7066 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7067 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7068 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7069 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7070 {
7071 pFpuCtx->FSW = fFsw;
7072 return;
7073 }
7074
7075 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7076 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7077 {
7078 /* All is fine, push the actual value. */
7079 pFpuCtx->FTW |= RT_BIT(iNewTop);
7080 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7081 }
7082 else if (pFpuCtx->FCW & X86_FCW_IM)
7083 {
7084 /* Masked stack overflow, push QNaN. */
7085 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7086 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7087 }
7088 else
7089 {
7090 /* Raise stack overflow, don't push anything. */
7091 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7092 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7093 return;
7094 }
7095
7096 fFsw &= ~X86_FSW_TOP_MASK;
7097 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7098 pFpuCtx->FSW = fFsw;
7099
7100 iemFpuRotateStackPush(pFpuCtx);
7101}
7102
7103
7104/**
7105 * Stores a result in a FPU register and updates the FSW and FTW.
7106 *
7107 * @param pFpuCtx The FPU context.
7108 * @param pResult The result to store.
7109 * @param iStReg Which FPU register to store it in.
7110 */
7111IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7112{
7113 Assert(iStReg < 8);
7114 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7115 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7116 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7117 pFpuCtx->FTW |= RT_BIT(iReg);
7118 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7119}
7120
7121
7122/**
7123 * Only updates the FPU status word (FSW) with the result of the current
7124 * instruction.
7125 *
7126 * @param pFpuCtx The FPU context.
7127 * @param u16FSW The FSW output of the current instruction.
7128 */
7129IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7130{
7131 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7132 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7133}
7134
7135
7136/**
7137 * Pops one item off the FPU stack if no pending exception prevents it.
7138 *
7139 * @param pFpuCtx The FPU context.
7140 */
7141IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7142{
7143 /* Check pending exceptions. */
7144 uint16_t uFSW = pFpuCtx->FSW;
7145 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7146 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7147 return;
7148
7149 /* TOP--. */
7150 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7151 uFSW &= ~X86_FSW_TOP_MASK;
7152 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7153 pFpuCtx->FSW = uFSW;
7154
7155 /* Mark the previous ST0 as empty. */
7156 iOldTop >>= X86_FSW_TOP_SHIFT;
7157 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7158
7159 /* Rotate the registers. */
7160 iemFpuRotateStackPop(pFpuCtx);
7161}
7162
7163
7164/**
7165 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7166 *
7167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7168 * @param pResult The FPU operation result to push.
7169 */
7170IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7171{
7172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7173 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7174 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7175 iemFpuMaybePushResult(pResult, pFpuCtx);
7176}
7177
7178
7179/**
7180 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7181 * and sets FPUDP and FPUDS.
7182 *
7183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7184 * @param pResult The FPU operation result to push.
7185 * @param iEffSeg The effective segment register.
7186 * @param GCPtrEff The effective address relative to @a iEffSeg.
7187 */
7188IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7189{
7190 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7191 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7192 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7193 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7194 iemFpuMaybePushResult(pResult, pFpuCtx);
7195}
7196
7197
7198/**
7199 * Replace ST0 with the first value and push the second onto the FPU stack,
7200 * unless a pending exception prevents it.
7201 *
7202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7203 * @param pResult The FPU operation result to store and push.
7204 */
7205IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7206{
7207 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7208 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7209 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7210
7211 /* Update FSW and bail if there are pending exceptions afterwards. */
7212 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7213 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7214 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7215 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7216 {
7217 pFpuCtx->FSW = fFsw;
7218 return;
7219 }
7220
7221 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7222 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7223 {
7224 /* All is fine, push the actual value. */
7225 pFpuCtx->FTW |= RT_BIT(iNewTop);
7226 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7227 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7228 }
7229 else if (pFpuCtx->FCW & X86_FCW_IM)
7230 {
7231 /* Masked stack overflow, push QNaN. */
7232 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7233 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7234 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7235 }
7236 else
7237 {
7238 /* Raise stack overflow, don't push anything. */
7239 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7241 return;
7242 }
7243
7244 fFsw &= ~X86_FSW_TOP_MASK;
7245 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7246 pFpuCtx->FSW = fFsw;
7247
7248 iemFpuRotateStackPush(pFpuCtx);
7249}
7250
7251
7252/**
7253 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7254 * FOP.
7255 *
7256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7257 * @param pResult The result to store.
7258 * @param iStReg Which FPU register to store it in.
7259 */
7260IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7261{
7262 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7263 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7264 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7265 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7266}
7267
7268
7269/**
7270 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7271 * FOP, and then pops the stack.
7272 *
7273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7274 * @param pResult The result to store.
7275 * @param iStReg Which FPU register to store it in.
7276 */
7277IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7278{
7279 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7280 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7281 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7282 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7283 iemFpuMaybePopOne(pFpuCtx);
7284}
7285
7286
7287/**
7288 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7289 * FPUDP, and FPUDS.
7290 *
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 * @param pResult The result to store.
7293 * @param iStReg Which FPU register to store it in.
7294 * @param iEffSeg The effective memory operand selector register.
7295 * @param GCPtrEff The effective memory operand offset.
7296 */
7297IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7298 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7299{
7300 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7301 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7302 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7304 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7305}
7306
7307
7308/**
7309 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7310 * FPUDP, and FPUDS, and then pops the stack.
7311 *
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param pResult The result to store.
7314 * @param iStReg Which FPU register to store it in.
7315 * @param iEffSeg The effective memory operand selector register.
7316 * @param GCPtrEff The effective memory operand offset.
7317 */
7318IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7319 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7320{
7321 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7322 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7323 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7324 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7325 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7326 iemFpuMaybePopOne(pFpuCtx);
7327}
7328
7329
7330/**
7331 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7332 *
7333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7334 */
7335IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7336{
7337 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7338 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7339 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7340}
7341
7342
7343/**
7344 * Marks the specified stack register as free (for FFREE).
7345 *
7346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7347 * @param iStReg The register to free.
7348 */
7349IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7350{
7351 Assert(iStReg < 8);
7352 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7353 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7354 pFpuCtx->FTW &= ~RT_BIT(iReg);
7355}
7356
7357
7358/**
7359 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7360 *
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 */
7363IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7364{
7365 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7366 uint16_t uFsw = pFpuCtx->FSW;
7367 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7368 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7369 uFsw &= ~X86_FSW_TOP_MASK;
7370 uFsw |= uTop;
7371 pFpuCtx->FSW = uFsw;
7372}
7373
7374
7375/**
7376 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 */
7380IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7381{
7382 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7383 uint16_t uFsw = pFpuCtx->FSW;
7384 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7385 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7386 uFsw &= ~X86_FSW_TOP_MASK;
7387 uFsw |= uTop;
7388 pFpuCtx->FSW = uFsw;
7389}
7390
7391
7392/**
7393 * Updates the FSW, FOP, FPUIP, and FPUCS.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param u16FSW The FSW from the current instruction.
7397 */
7398IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7399{
7400 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7401 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7402 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7403 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7404}
7405
7406
7407/**
7408 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7409 *
7410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7411 * @param u16FSW The FSW from the current instruction.
7412 */
7413IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7414{
7415 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7416 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7417 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7418 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7419 iemFpuMaybePopOne(pFpuCtx);
7420}
7421
7422
7423/**
7424 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7425 *
7426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7427 * @param u16FSW The FSW from the current instruction.
7428 * @param iEffSeg The effective memory operand selector register.
7429 * @param GCPtrEff The effective memory operand offset.
7430 */
7431IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7432{
7433 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7434 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7435 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7436 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7437 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7438}
7439
7440
7441/**
7442 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7443 *
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 * @param u16FSW The FSW from the current instruction.
7446 */
7447IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7448{
7449 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7450 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7451 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7452 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7453 iemFpuMaybePopOne(pFpuCtx);
7454 iemFpuMaybePopOne(pFpuCtx);
7455}
7456
7457
7458/**
7459 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7460 *
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 * @param u16FSW The FSW from the current instruction.
7463 * @param iEffSeg The effective memory operand selector register.
7464 * @param GCPtrEff The effective memory operand offset.
7465 */
7466IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7467{
7468 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7469 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7470 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7471 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7472 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7473 iemFpuMaybePopOne(pFpuCtx);
7474}
7475
7476
7477/**
7478 * Worker routine for raising an FPU stack underflow exception.
7479 *
7480 * @param pFpuCtx The FPU context.
7481 * @param iStReg The stack register being accessed.
7482 */
7483IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7484{
7485 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7486 if (pFpuCtx->FCW & X86_FCW_IM)
7487 {
7488 /* Masked underflow. */
7489 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7490 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7491 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7492 if (iStReg != UINT8_MAX)
7493 {
7494 pFpuCtx->FTW |= RT_BIT(iReg);
7495 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7496 }
7497 }
7498 else
7499 {
7500 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7501 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7502 }
7503}
7504
7505
7506/**
7507 * Raises a FPU stack underflow exception.
7508 *
7509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7510 * @param iStReg The destination register that should be loaded
7511 * with QNaN if \#IS is not masked. Specify
7512 * UINT8_MAX if none (like for fcom).
7513 */
7514DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7515{
7516 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7517 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7518 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7519 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7520}
7521
7522
7523DECL_NO_INLINE(IEM_STATIC, void)
7524iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7525{
7526 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7527 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7528 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7529 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7530 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7531}
7532
7533
7534DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7535{
7536 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7537 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7539 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544DECL_NO_INLINE(IEM_STATIC, void)
7545iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7546{
7547 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7548 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7549 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7550 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7551 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7552 iemFpuMaybePopOne(pFpuCtx);
7553}
7554
7555
7556DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7557{
7558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7559 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7560 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7561 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7562 iemFpuMaybePopOne(pFpuCtx);
7563 iemFpuMaybePopOne(pFpuCtx);
7564}
7565
7566
7567DECL_NO_INLINE(IEM_STATIC, void)
7568iemFpuStackPushUnderflow(PVMCPU pVCpu)
7569{
7570 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7571 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7572 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7573
7574 if (pFpuCtx->FCW & X86_FCW_IM)
7575 {
7576 /* Masked overflow - Push QNaN. */
7577 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7578 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7579 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7580 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7581 pFpuCtx->FTW |= RT_BIT(iNewTop);
7582 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7583 iemFpuRotateStackPush(pFpuCtx);
7584 }
7585 else
7586 {
7587 /* Exception pending - don't change TOP or the register stack. */
7588 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7589 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7590 }
7591}
7592
7593
7594DECL_NO_INLINE(IEM_STATIC, void)
7595iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7596{
7597 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7598 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7599 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7600
7601 if (pFpuCtx->FCW & X86_FCW_IM)
7602 {
7603 /* Masked overflow - Push QNaN. */
7604 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7605 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7606 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7607 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7608 pFpuCtx->FTW |= RT_BIT(iNewTop);
7609 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7610 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7611 iemFpuRotateStackPush(pFpuCtx);
7612 }
7613 else
7614 {
7615 /* Exception pending - don't change TOP or the register stack. */
7616 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7617 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7618 }
7619}
7620
7621
7622/**
7623 * Worker routine for raising an FPU stack overflow exception on a push.
7624 *
7625 * @param pFpuCtx The FPU context.
7626 */
7627IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7628{
7629 if (pFpuCtx->FCW & X86_FCW_IM)
7630 {
7631 /* Masked overflow. */
7632 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7633 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7634 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7635 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7636 pFpuCtx->FTW |= RT_BIT(iNewTop);
7637 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7638 iemFpuRotateStackPush(pFpuCtx);
7639 }
7640 else
7641 {
7642 /* Exception pending - don't change TOP or the register stack. */
7643 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7644 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7645 }
7646}
7647
7648
7649/**
7650 * Raises a FPU stack overflow exception on a push.
7651 *
7652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7653 */
7654DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7655{
7656 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7657 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7658 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7659 iemFpuStackPushOverflowOnly(pFpuCtx);
7660}
7661
7662
7663/**
7664 * Raises a FPU stack overflow exception on a push with a memory operand.
7665 *
7666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7667 * @param iEffSeg The effective memory operand selector register.
7668 * @param GCPtrEff The effective memory operand offset.
7669 */
7670DECL_NO_INLINE(IEM_STATIC, void)
7671iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7672{
7673 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7674 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7675 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7676 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7677 iemFpuStackPushOverflowOnly(pFpuCtx);
7678}
7679
7680
7681IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7682{
7683 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7684 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7685 if (pFpuCtx->FTW & RT_BIT(iReg))
7686 return VINF_SUCCESS;
7687 return VERR_NOT_FOUND;
7688}
7689
7690
7691IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7692{
7693 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7694 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7695 if (pFpuCtx->FTW & RT_BIT(iReg))
7696 {
7697 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7698 return VINF_SUCCESS;
7699 }
7700 return VERR_NOT_FOUND;
7701}
7702
7703
7704IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7705 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7706{
7707 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7708 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7709 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7710 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7711 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7712 {
7713 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7714 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7715 return VINF_SUCCESS;
7716 }
7717 return VERR_NOT_FOUND;
7718}
7719
7720
7721IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7722{
7723 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7724 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7725 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7726 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7727 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7728 {
7729 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7730 return VINF_SUCCESS;
7731 }
7732 return VERR_NOT_FOUND;
7733}
7734
7735
7736/**
7737 * Updates the FPU exception status after FCW is changed.
7738 *
7739 * @param pFpuCtx The FPU context.
7740 */
7741IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7742{
7743 uint16_t u16Fsw = pFpuCtx->FSW;
7744 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7745 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7746 else
7747 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7748 pFpuCtx->FSW = u16Fsw;
7749}
7750
7751
7752/**
7753 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7754 *
7755 * @returns The full FTW.
7756 * @param pFpuCtx The FPU context.
7757 */
7758IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7759{
7760 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7761 uint16_t u16Ftw = 0;
7762 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7763 for (unsigned iSt = 0; iSt < 8; iSt++)
7764 {
7765 unsigned const iReg = (iSt + iTop) & 7;
7766 if (!(u8Ftw & RT_BIT(iReg)))
7767 u16Ftw |= 3 << (iReg * 2); /* empty */
7768 else
7769 {
7770 uint16_t uTag;
7771 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7772 if (pr80Reg->s.uExponent == 0x7fff)
7773 uTag = 2; /* Exponent is all 1's => Special. */
7774 else if (pr80Reg->s.uExponent == 0x0000)
7775 {
7776 if (pr80Reg->s.u64Mantissa == 0x0000)
7777 uTag = 1; /* All bits are zero => Zero. */
7778 else
7779 uTag = 2; /* Must be special. */
7780 }
7781 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7782 uTag = 0; /* Valid. */
7783 else
7784 uTag = 2; /* Must be special. */
7785
7786 u16Ftw |= uTag << (iReg * 2); /* empty */
7787 }
7788 }
7789
7790 return u16Ftw;
7791}
7792
7793
7794/**
7795 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7796 *
7797 * @returns The compressed FTW.
7798 * @param u16FullFtw The full FTW to convert.
7799 */
7800IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7801{
7802 uint8_t u8Ftw = 0;
7803 for (unsigned i = 0; i < 8; i++)
7804 {
7805 if ((u16FullFtw & 3) != 3 /*empty*/)
7806 u8Ftw |= RT_BIT(i);
7807 u16FullFtw >>= 2;
7808 }
7809
7810 return u8Ftw;
7811}
7812
7813/** @} */
7814
7815
7816/** @name Memory access.
7817 *
7818 * @{
7819 */
7820
7821
7822/**
7823 * Updates the IEMCPU::cbWritten counter if applicable.
7824 *
7825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7826 * @param fAccess The access being accounted for.
7827 * @param cbMem The access size.
7828 */
7829DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7830{
7831 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7832 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7833 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7834}
7835
7836
7837/**
7838 * Checks if the given segment can be written to, raise the appropriate
7839 * exception if not.
7840 *
7841 * @returns VBox strict status code.
7842 *
7843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7844 * @param pHid Pointer to the hidden register.
7845 * @param iSegReg The register number.
7846 * @param pu64BaseAddr Where to return the base address to use for the
7847 * segment. (In 64-bit code it may differ from the
7848 * base in the hidden segment.)
7849 */
7850IEM_STATIC VBOXSTRICTRC
7851iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7852{
7853 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7854 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7855 else
7856 {
7857 if (!pHid->Attr.n.u1Present)
7858 {
7859 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7860 AssertRelease(uSel == 0);
7861 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7862 return iemRaiseGeneralProtectionFault0(pVCpu);
7863 }
7864
7865 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7866 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7867 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7868 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7869 *pu64BaseAddr = pHid->u64Base;
7870 }
7871 return VINF_SUCCESS;
7872}
7873
7874
7875/**
7876 * Checks if the given segment can be read from, raise the appropriate
7877 * exception if not.
7878 *
7879 * @returns VBox strict status code.
7880 *
7881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7882 * @param pHid Pointer to the hidden register.
7883 * @param iSegReg The register number.
7884 * @param pu64BaseAddr Where to return the base address to use for the
7885 * segment. (In 64-bit code it may differ from the
7886 * base in the hidden segment.)
7887 */
7888IEM_STATIC VBOXSTRICTRC
7889iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7890{
7891 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7892 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7893 else
7894 {
7895 if (!pHid->Attr.n.u1Present)
7896 {
7897 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7898 AssertRelease(uSel == 0);
7899 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7900 return iemRaiseGeneralProtectionFault0(pVCpu);
7901 }
7902
7903 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7904 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7905 *pu64BaseAddr = pHid->u64Base;
7906 }
7907 return VINF_SUCCESS;
7908}
7909
7910
7911/**
7912 * Applies the segment limit, base and attributes.
7913 *
7914 * This may raise a \#GP or \#SS.
7915 *
7916 * @returns VBox strict status code.
7917 *
7918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7919 * @param fAccess The kind of access which is being performed.
7920 * @param iSegReg The index of the segment register to apply.
7921 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7922 * TSS, ++).
7923 * @param cbMem The access size.
7924 * @param pGCPtrMem Pointer to the guest memory address to apply
7925 * segmentation to. Input and output parameter.
7926 */
7927IEM_STATIC VBOXSTRICTRC
7928iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7929{
7930 if (iSegReg == UINT8_MAX)
7931 return VINF_SUCCESS;
7932
7933 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7934 switch (pVCpu->iem.s.enmCpuMode)
7935 {
7936 case IEMMODE_16BIT:
7937 case IEMMODE_32BIT:
7938 {
7939 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7940 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7941
7942 if ( pSel->Attr.n.u1Present
7943 && !pSel->Attr.n.u1Unusable)
7944 {
7945 Assert(pSel->Attr.n.u1DescType);
7946 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7947 {
7948 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7949 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7950 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7951
7952 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7953 {
7954 /** @todo CPL check. */
7955 }
7956
7957 /*
7958 * There are two kinds of data selectors, normal and expand down.
7959 */
7960 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7961 {
7962 if ( GCPtrFirst32 > pSel->u32Limit
7963 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7964 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7965 }
7966 else
7967 {
7968 /*
7969 * The upper boundary is defined by the B bit, not the G bit!
7970 */
7971 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7972 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7973 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7974 }
7975 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7976 }
7977 else
7978 {
7979
7980 /*
7981 * Code selector and usually be used to read thru, writing is
7982 * only permitted in real and V8086 mode.
7983 */
7984 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7985 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7986 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7987 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7988 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7989
7990 if ( GCPtrFirst32 > pSel->u32Limit
7991 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7992 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7993
7994 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7995 {
7996 /** @todo CPL check. */
7997 }
7998
7999 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8000 }
8001 }
8002 else
8003 return iemRaiseGeneralProtectionFault0(pVCpu);
8004 return VINF_SUCCESS;
8005 }
8006
8007 case IEMMODE_64BIT:
8008 {
8009 RTGCPTR GCPtrMem = *pGCPtrMem;
8010 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8011 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8012
8013 Assert(cbMem >= 1);
8014 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8015 return VINF_SUCCESS;
8016 return iemRaiseGeneralProtectionFault0(pVCpu);
8017 }
8018
8019 default:
8020 AssertFailedReturn(VERR_IEM_IPE_7);
8021 }
8022}
8023
8024
8025/**
8026 * Translates a virtual address to a physical physical address and checks if we
8027 * can access the page as specified.
8028 *
8029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8030 * @param GCPtrMem The virtual address.
8031 * @param fAccess The intended access.
8032 * @param pGCPhysMem Where to return the physical address.
8033 */
8034IEM_STATIC VBOXSTRICTRC
8035iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8036{
8037 /** @todo Need a different PGM interface here. We're currently using
8038 * generic / REM interfaces. this won't cut it for R0 & RC. */
8039 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8040 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8041 RTGCPHYS GCPhys;
8042 uint64_t fFlags;
8043 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8044 if (RT_FAILURE(rc))
8045 {
8046 /** @todo Check unassigned memory in unpaged mode. */
8047 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8048 *pGCPhysMem = NIL_RTGCPHYS;
8049 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8050 }
8051
8052 /* If the page is writable and does not have the no-exec bit set, all
8053 access is allowed. Otherwise we'll have to check more carefully... */
8054 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8055 {
8056 /* Write to read only memory? */
8057 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8058 && !(fFlags & X86_PTE_RW)
8059 && ( (pVCpu->iem.s.uCpl == 3
8060 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8061 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8062 {
8063 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8064 *pGCPhysMem = NIL_RTGCPHYS;
8065 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8066 }
8067
8068 /* Kernel memory accessed by userland? */
8069 if ( !(fFlags & X86_PTE_US)
8070 && pVCpu->iem.s.uCpl == 3
8071 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8072 {
8073 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8074 *pGCPhysMem = NIL_RTGCPHYS;
8075 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8076 }
8077
8078 /* Executing non-executable memory? */
8079 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8080 && (fFlags & X86_PTE_PAE_NX)
8081 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8082 {
8083 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8084 *pGCPhysMem = NIL_RTGCPHYS;
8085 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8086 VERR_ACCESS_DENIED);
8087 }
8088 }
8089
8090 /*
8091 * Set the dirty / access flags.
8092 * ASSUMES this is set when the address is translated rather than on committ...
8093 */
8094 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8095 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8096 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8097 {
8098 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8099 AssertRC(rc2);
8100 }
8101
8102 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8103 *pGCPhysMem = GCPhys;
8104 return VINF_SUCCESS;
8105}
8106
8107
8108
8109/**
8110 * Maps a physical page.
8111 *
8112 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param GCPhysMem The physical address.
8115 * @param fAccess The intended access.
8116 * @param ppvMem Where to return the mapping address.
8117 * @param pLock The PGM lock.
8118 */
8119IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8120{
8121#ifdef IEM_VERIFICATION_MODE_FULL
8122 /* Force the alternative path so we can ignore writes. */
8123 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8124 {
8125 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8126 {
8127 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8128 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8129 if (RT_FAILURE(rc2))
8130 pVCpu->iem.s.fProblematicMemory = true;
8131 }
8132 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8133 }
8134#endif
8135#ifdef IEM_LOG_MEMORY_WRITES
8136 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8137 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8138#endif
8139#ifdef IEM_VERIFICATION_MODE_MINIMAL
8140 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8141#endif
8142
8143 /** @todo This API may require some improving later. A private deal with PGM
8144 * regarding locking and unlocking needs to be struct. A couple of TLBs
8145 * living in PGM, but with publicly accessible inlined access methods
8146 * could perhaps be an even better solution. */
8147 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8148 GCPhysMem,
8149 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8150 pVCpu->iem.s.fBypassHandlers,
8151 ppvMem,
8152 pLock);
8153 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8154 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8155
8156#ifdef IEM_VERIFICATION_MODE_FULL
8157 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8158 pVCpu->iem.s.fProblematicMemory = true;
8159#endif
8160 return rc;
8161}
8162
8163
8164/**
8165 * Unmap a page previously mapped by iemMemPageMap.
8166 *
8167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8168 * @param GCPhysMem The physical address.
8169 * @param fAccess The intended access.
8170 * @param pvMem What iemMemPageMap returned.
8171 * @param pLock The PGM lock.
8172 */
8173DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8174{
8175 NOREF(pVCpu);
8176 NOREF(GCPhysMem);
8177 NOREF(fAccess);
8178 NOREF(pvMem);
8179 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8180}
8181
8182
8183/**
8184 * Looks up a memory mapping entry.
8185 *
8186 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8188 * @param pvMem The memory address.
8189 * @param fAccess The access to.
8190 */
8191DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8192{
8193 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8194 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8195 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8196 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8197 return 0;
8198 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8199 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8200 return 1;
8201 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8202 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8203 return 2;
8204 return VERR_NOT_FOUND;
8205}
8206
8207
8208/**
8209 * Finds a free memmap entry when using iNextMapping doesn't work.
8210 *
8211 * @returns Memory mapping index, 1024 on failure.
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 */
8214IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8215{
8216 /*
8217 * The easy case.
8218 */
8219 if (pVCpu->iem.s.cActiveMappings == 0)
8220 {
8221 pVCpu->iem.s.iNextMapping = 1;
8222 return 0;
8223 }
8224
8225 /* There should be enough mappings for all instructions. */
8226 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8227
8228 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8229 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8230 return i;
8231
8232 AssertFailedReturn(1024);
8233}
8234
8235
8236/**
8237 * Commits a bounce buffer that needs writing back and unmaps it.
8238 *
8239 * @returns Strict VBox status code.
8240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8241 * @param iMemMap The index of the buffer to commit.
8242 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8243 * Always false in ring-3, obviously.
8244 */
8245IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8246{
8247 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8248 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8249#ifdef IN_RING3
8250 Assert(!fPostponeFail);
8251 RT_NOREF_PV(fPostponeFail);
8252#endif
8253
8254 /*
8255 * Do the writing.
8256 */
8257#ifndef IEM_VERIFICATION_MODE_MINIMAL
8258 PVM pVM = pVCpu->CTX_SUFF(pVM);
8259 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8260 && !IEM_VERIFICATION_ENABLED(pVCpu))
8261 {
8262 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8263 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8264 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8265 if (!pVCpu->iem.s.fBypassHandlers)
8266 {
8267 /*
8268 * Carefully and efficiently dealing with access handler return
8269 * codes make this a little bloated.
8270 */
8271 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8272 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8273 pbBuf,
8274 cbFirst,
8275 PGMACCESSORIGIN_IEM);
8276 if (rcStrict == VINF_SUCCESS)
8277 {
8278 if (cbSecond)
8279 {
8280 rcStrict = PGMPhysWrite(pVM,
8281 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8282 pbBuf + cbFirst,
8283 cbSecond,
8284 PGMACCESSORIGIN_IEM);
8285 if (rcStrict == VINF_SUCCESS)
8286 { /* nothing */ }
8287 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8288 {
8289 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8290 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8291 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8292 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8293 }
8294# ifndef IN_RING3
8295 else if (fPostponeFail)
8296 {
8297 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8298 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8299 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8300 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8301 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8302 return iemSetPassUpStatus(pVCpu, rcStrict);
8303 }
8304# endif
8305 else
8306 {
8307 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8308 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8309 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8310 return rcStrict;
8311 }
8312 }
8313 }
8314 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8315 {
8316 if (!cbSecond)
8317 {
8318 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8319 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8320 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8321 }
8322 else
8323 {
8324 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8325 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8326 pbBuf + cbFirst,
8327 cbSecond,
8328 PGMACCESSORIGIN_IEM);
8329 if (rcStrict2 == VINF_SUCCESS)
8330 {
8331 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8332 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8334 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8335 }
8336 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8337 {
8338 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8341 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8342 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8343 }
8344# ifndef IN_RING3
8345 else if (fPostponeFail)
8346 {
8347 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8348 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8349 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8350 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8351 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8352 return iemSetPassUpStatus(pVCpu, rcStrict);
8353 }
8354# endif
8355 else
8356 {
8357 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8360 return rcStrict2;
8361 }
8362 }
8363 }
8364# ifndef IN_RING3
8365 else if (fPostponeFail)
8366 {
8367 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8370 if (!cbSecond)
8371 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8372 else
8373 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8374 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8375 return iemSetPassUpStatus(pVCpu, rcStrict);
8376 }
8377# endif
8378 else
8379 {
8380 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8382 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8383 return rcStrict;
8384 }
8385 }
8386 else
8387 {
8388 /*
8389 * No access handlers, much simpler.
8390 */
8391 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8392 if (RT_SUCCESS(rc))
8393 {
8394 if (cbSecond)
8395 {
8396 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8397 if (RT_SUCCESS(rc))
8398 { /* likely */ }
8399 else
8400 {
8401 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8404 return rc;
8405 }
8406 }
8407 }
8408 else
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8413 return rc;
8414 }
8415 }
8416 }
8417#endif
8418
8419#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8420 /*
8421 * Record the write(s).
8422 */
8423 if (!pVCpu->iem.s.fNoRem)
8424 {
8425 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8426 if (pEvtRec)
8427 {
8428 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8429 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8430 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8431 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8432 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8433 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8434 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8435 }
8436 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8437 {
8438 pEvtRec = iemVerifyAllocRecord(pVCpu);
8439 if (pEvtRec)
8440 {
8441 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8442 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8443 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8444 memcpy(pEvtRec->u.RamWrite.ab,
8445 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8446 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8447 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8448 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8449 }
8450 }
8451 }
8452#endif
8453#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8454 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8455 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8456 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8457 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8458 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8459 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8460
8461 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8462 g_cbIemWrote = cbWrote;
8463 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8464#endif
8465
8466 /*
8467 * Free the mapping entry.
8468 */
8469 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8470 Assert(pVCpu->iem.s.cActiveMappings != 0);
8471 pVCpu->iem.s.cActiveMappings--;
8472 return VINF_SUCCESS;
8473}
8474
8475
8476/**
8477 * iemMemMap worker that deals with a request crossing pages.
8478 */
8479IEM_STATIC VBOXSTRICTRC
8480iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8481{
8482 /*
8483 * Do the address translations.
8484 */
8485 RTGCPHYS GCPhysFirst;
8486 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8487 if (rcStrict != VINF_SUCCESS)
8488 return rcStrict;
8489
8490 RTGCPHYS GCPhysSecond;
8491 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8492 fAccess, &GCPhysSecond);
8493 if (rcStrict != VINF_SUCCESS)
8494 return rcStrict;
8495 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8496
8497 PVM pVM = pVCpu->CTX_SUFF(pVM);
8498#ifdef IEM_VERIFICATION_MODE_FULL
8499 /*
8500 * Detect problematic memory when verifying so we can select
8501 * the right execution engine. (TLB: Redo this.)
8502 */
8503 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8504 {
8505 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8506 if (RT_SUCCESS(rc2))
8507 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8508 if (RT_FAILURE(rc2))
8509 pVCpu->iem.s.fProblematicMemory = true;
8510 }
8511#endif
8512
8513
8514 /*
8515 * Read in the current memory content if it's a read, execute or partial
8516 * write access.
8517 */
8518 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8519 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8520 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8521
8522 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8523 {
8524 if (!pVCpu->iem.s.fBypassHandlers)
8525 {
8526 /*
8527 * Must carefully deal with access handler status codes here,
8528 * makes the code a bit bloated.
8529 */
8530 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8531 if (rcStrict == VINF_SUCCESS)
8532 {
8533 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8534 if (rcStrict == VINF_SUCCESS)
8535 { /*likely */ }
8536 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8537 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8538 else
8539 {
8540 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8541 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8542 return rcStrict;
8543 }
8544 }
8545 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8546 {
8547 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8548 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8549 {
8550 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8551 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8552 }
8553 else
8554 {
8555 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8556 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8557 return rcStrict2;
8558 }
8559 }
8560 else
8561 {
8562 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8563 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8564 return rcStrict;
8565 }
8566 }
8567 else
8568 {
8569 /*
8570 * No informational status codes here, much more straight forward.
8571 */
8572 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8573 if (RT_SUCCESS(rc))
8574 {
8575 Assert(rc == VINF_SUCCESS);
8576 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8577 if (RT_SUCCESS(rc))
8578 Assert(rc == VINF_SUCCESS);
8579 else
8580 {
8581 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8582 return rc;
8583 }
8584 }
8585 else
8586 {
8587 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8588 return rc;
8589 }
8590 }
8591
8592#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8593 if ( !pVCpu->iem.s.fNoRem
8594 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8595 {
8596 /*
8597 * Record the reads.
8598 */
8599 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8600 if (pEvtRec)
8601 {
8602 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8603 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8604 pEvtRec->u.RamRead.cb = cbFirstPage;
8605 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8606 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8607 }
8608 pEvtRec = iemVerifyAllocRecord(pVCpu);
8609 if (pEvtRec)
8610 {
8611 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8612 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8613 pEvtRec->u.RamRead.cb = cbSecondPage;
8614 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8615 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8616 }
8617 }
8618#endif
8619 }
8620#ifdef VBOX_STRICT
8621 else
8622 memset(pbBuf, 0xcc, cbMem);
8623 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8624 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8625#endif
8626
8627 /*
8628 * Commit the bounce buffer entry.
8629 */
8630 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8632 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8633 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8634 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8635 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8636 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8637 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8638 pVCpu->iem.s.cActiveMappings++;
8639
8640 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8641 *ppvMem = pbBuf;
8642 return VINF_SUCCESS;
8643}
8644
8645
8646/**
8647 * iemMemMap woker that deals with iemMemPageMap failures.
8648 */
8649IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8650 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8651{
8652 /*
8653 * Filter out conditions we can handle and the ones which shouldn't happen.
8654 */
8655 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8656 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8657 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8658 {
8659 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8660 return rcMap;
8661 }
8662 pVCpu->iem.s.cPotentialExits++;
8663
8664 /*
8665 * Read in the current memory content if it's a read, execute or partial
8666 * write access.
8667 */
8668 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8669 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8670 {
8671 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8672 memset(pbBuf, 0xff, cbMem);
8673 else
8674 {
8675 int rc;
8676 if (!pVCpu->iem.s.fBypassHandlers)
8677 {
8678 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8679 if (rcStrict == VINF_SUCCESS)
8680 { /* nothing */ }
8681 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8682 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8683 else
8684 {
8685 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8686 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8687 return rcStrict;
8688 }
8689 }
8690 else
8691 {
8692 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8693 if (RT_SUCCESS(rc))
8694 { /* likely */ }
8695 else
8696 {
8697 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8698 GCPhysFirst, rc));
8699 return rc;
8700 }
8701 }
8702 }
8703
8704#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8705 if ( !pVCpu->iem.s.fNoRem
8706 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8707 {
8708 /*
8709 * Record the read.
8710 */
8711 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8712 if (pEvtRec)
8713 {
8714 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8715 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8716 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8717 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8718 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8719 }
8720 }
8721#endif
8722 }
8723#ifdef VBOX_STRICT
8724 else
8725 memset(pbBuf, 0xcc, cbMem);
8726#endif
8727#ifdef VBOX_STRICT
8728 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8729 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8730#endif
8731
8732 /*
8733 * Commit the bounce buffer entry.
8734 */
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8737 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8738 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8739 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8740 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8741 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8742 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8743 pVCpu->iem.s.cActiveMappings++;
8744
8745 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8746 *ppvMem = pbBuf;
8747 return VINF_SUCCESS;
8748}
8749
8750
8751
8752/**
8753 * Maps the specified guest memory for the given kind of access.
8754 *
8755 * This may be using bounce buffering of the memory if it's crossing a page
8756 * boundary or if there is an access handler installed for any of it. Because
8757 * of lock prefix guarantees, we're in for some extra clutter when this
8758 * happens.
8759 *
8760 * This may raise a \#GP, \#SS, \#PF or \#AC.
8761 *
8762 * @returns VBox strict status code.
8763 *
8764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8765 * @param ppvMem Where to return the pointer to the mapped
8766 * memory.
8767 * @param cbMem The number of bytes to map. This is usually 1,
8768 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8769 * string operations it can be up to a page.
8770 * @param iSegReg The index of the segment register to use for
8771 * this access. The base and limits are checked.
8772 * Use UINT8_MAX to indicate that no segmentation
8773 * is required (for IDT, GDT and LDT accesses).
8774 * @param GCPtrMem The address of the guest memory.
8775 * @param fAccess How the memory is being accessed. The
8776 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8777 * how to map the memory, while the
8778 * IEM_ACCESS_WHAT_XXX bit is used when raising
8779 * exceptions.
8780 */
8781IEM_STATIC VBOXSTRICTRC
8782iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8783{
8784 /*
8785 * Check the input and figure out which mapping entry to use.
8786 */
8787 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8788 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8789 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8790
8791 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8792 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8793 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8794 {
8795 iMemMap = iemMemMapFindFree(pVCpu);
8796 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8797 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8798 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8799 pVCpu->iem.s.aMemMappings[2].fAccess),
8800 VERR_IEM_IPE_9);
8801 }
8802
8803 /*
8804 * Map the memory, checking that we can actually access it. If something
8805 * slightly complicated happens, fall back on bounce buffering.
8806 */
8807 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8808 if (rcStrict != VINF_SUCCESS)
8809 return rcStrict;
8810
8811 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8812 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8813
8814 RTGCPHYS GCPhysFirst;
8815 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8816 if (rcStrict != VINF_SUCCESS)
8817 return rcStrict;
8818
8819 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8820 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8821 if (fAccess & IEM_ACCESS_TYPE_READ)
8822 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8823
8824 void *pvMem;
8825 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8826 if (rcStrict != VINF_SUCCESS)
8827 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8828
8829 /*
8830 * Fill in the mapping table entry.
8831 */
8832 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8833 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8834 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8835 pVCpu->iem.s.cActiveMappings++;
8836
8837 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8838 *ppvMem = pvMem;
8839 return VINF_SUCCESS;
8840}
8841
8842
8843/**
8844 * Commits the guest memory if bounce buffered and unmaps it.
8845 *
8846 * @returns Strict VBox status code.
8847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8848 * @param pvMem The mapping.
8849 * @param fAccess The kind of access.
8850 */
8851IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8852{
8853 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8854 AssertReturn(iMemMap >= 0, iMemMap);
8855
8856 /* If it's bounce buffered, we may need to write back the buffer. */
8857 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8858 {
8859 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8860 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8861 }
8862 /* Otherwise unlock it. */
8863 else
8864 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8865
8866 /* Free the entry. */
8867 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8868 Assert(pVCpu->iem.s.cActiveMappings != 0);
8869 pVCpu->iem.s.cActiveMappings--;
8870 return VINF_SUCCESS;
8871}
8872
8873#ifdef IEM_WITH_SETJMP
8874
8875/**
8876 * Maps the specified guest memory for the given kind of access, longjmp on
8877 * error.
8878 *
8879 * This may be using bounce buffering of the memory if it's crossing a page
8880 * boundary or if there is an access handler installed for any of it. Because
8881 * of lock prefix guarantees, we're in for some extra clutter when this
8882 * happens.
8883 *
8884 * This may raise a \#GP, \#SS, \#PF or \#AC.
8885 *
8886 * @returns Pointer to the mapped memory.
8887 *
8888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8889 * @param cbMem The number of bytes to map. This is usually 1,
8890 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8891 * string operations it can be up to a page.
8892 * @param iSegReg The index of the segment register to use for
8893 * this access. The base and limits are checked.
8894 * Use UINT8_MAX to indicate that no segmentation
8895 * is required (for IDT, GDT and LDT accesses).
8896 * @param GCPtrMem The address of the guest memory.
8897 * @param fAccess How the memory is being accessed. The
8898 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8899 * how to map the memory, while the
8900 * IEM_ACCESS_WHAT_XXX bit is used when raising
8901 * exceptions.
8902 */
8903IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8904{
8905 /*
8906 * Check the input and figure out which mapping entry to use.
8907 */
8908 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8909 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8910 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8911
8912 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8913 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8914 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8915 {
8916 iMemMap = iemMemMapFindFree(pVCpu);
8917 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8918 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8919 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8920 pVCpu->iem.s.aMemMappings[2].fAccess),
8921 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8922 }
8923
8924 /*
8925 * Map the memory, checking that we can actually access it. If something
8926 * slightly complicated happens, fall back on bounce buffering.
8927 */
8928 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8929 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8930 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8931
8932 /* Crossing a page boundary? */
8933 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8934 { /* No (likely). */ }
8935 else
8936 {
8937 void *pvMem;
8938 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8939 if (rcStrict == VINF_SUCCESS)
8940 return pvMem;
8941 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8942 }
8943
8944 RTGCPHYS GCPhysFirst;
8945 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8946 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8947 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8948
8949 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8950 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8951 if (fAccess & IEM_ACCESS_TYPE_READ)
8952 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8953
8954 void *pvMem;
8955 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8956 if (rcStrict == VINF_SUCCESS)
8957 { /* likely */ }
8958 else
8959 {
8960 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8961 if (rcStrict == VINF_SUCCESS)
8962 return pvMem;
8963 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8964 }
8965
8966 /*
8967 * Fill in the mapping table entry.
8968 */
8969 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8970 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8971 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8972 pVCpu->iem.s.cActiveMappings++;
8973
8974 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8975 return pvMem;
8976}
8977
8978
8979/**
8980 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8981 *
8982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8983 * @param pvMem The mapping.
8984 * @param fAccess The kind of access.
8985 */
8986IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8987{
8988 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8989 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8990
8991 /* If it's bounce buffered, we may need to write back the buffer. */
8992 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8993 {
8994 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8995 {
8996 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8997 if (rcStrict == VINF_SUCCESS)
8998 return;
8999 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000 }
9001 }
9002 /* Otherwise unlock it. */
9003 else
9004 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9005
9006 /* Free the entry. */
9007 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9008 Assert(pVCpu->iem.s.cActiveMappings != 0);
9009 pVCpu->iem.s.cActiveMappings--;
9010}
9011
9012#endif
9013
9014#ifndef IN_RING3
9015/**
9016 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9017 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9018 *
9019 * Allows the instruction to be completed and retired, while the IEM user will
9020 * return to ring-3 immediately afterwards and do the postponed writes there.
9021 *
9022 * @returns VBox status code (no strict statuses). Caller must check
9023 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9025 * @param pvMem The mapping.
9026 * @param fAccess The kind of access.
9027 */
9028IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9029{
9030 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9031 AssertReturn(iMemMap >= 0, iMemMap);
9032
9033 /* If it's bounce buffered, we may need to write back the buffer. */
9034 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9035 {
9036 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9037 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9038 }
9039 /* Otherwise unlock it. */
9040 else
9041 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9042
9043 /* Free the entry. */
9044 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9045 Assert(pVCpu->iem.s.cActiveMappings != 0);
9046 pVCpu->iem.s.cActiveMappings--;
9047 return VINF_SUCCESS;
9048}
9049#endif
9050
9051
9052/**
9053 * Rollbacks mappings, releasing page locks and such.
9054 *
9055 * The caller shall only call this after checking cActiveMappings.
9056 *
9057 * @returns Strict VBox status code to pass up.
9058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9059 */
9060IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9061{
9062 Assert(pVCpu->iem.s.cActiveMappings > 0);
9063
9064 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9065 while (iMemMap-- > 0)
9066 {
9067 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9068 if (fAccess != IEM_ACCESS_INVALID)
9069 {
9070 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9071 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9072 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9073 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9074 Assert(pVCpu->iem.s.cActiveMappings > 0);
9075 pVCpu->iem.s.cActiveMappings--;
9076 }
9077 }
9078}
9079
9080
9081/**
9082 * Fetches a data byte.
9083 *
9084 * @returns Strict VBox status code.
9085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9086 * @param pu8Dst Where to return the byte.
9087 * @param iSegReg The index of the segment register to use for
9088 * this access. The base and limits are checked.
9089 * @param GCPtrMem The address of the guest memory.
9090 */
9091IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9092{
9093 /* The lazy approach for now... */
9094 uint8_t const *pu8Src;
9095 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9096 if (rc == VINF_SUCCESS)
9097 {
9098 *pu8Dst = *pu8Src;
9099 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9100 }
9101 return rc;
9102}
9103
9104
9105#ifdef IEM_WITH_SETJMP
9106/**
9107 * Fetches a data byte, longjmp on error.
9108 *
9109 * @returns The byte.
9110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9111 * @param iSegReg The index of the segment register to use for
9112 * this access. The base and limits are checked.
9113 * @param GCPtrMem The address of the guest memory.
9114 */
9115DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9116{
9117 /* The lazy approach for now... */
9118 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9119 uint8_t const bRet = *pu8Src;
9120 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9121 return bRet;
9122}
9123#endif /* IEM_WITH_SETJMP */
9124
9125
9126/**
9127 * Fetches a data word.
9128 *
9129 * @returns Strict VBox status code.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 * @param pu16Dst Where to return the word.
9132 * @param iSegReg The index of the segment register to use for
9133 * this access. The base and limits are checked.
9134 * @param GCPtrMem The address of the guest memory.
9135 */
9136IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9137{
9138 /* The lazy approach for now... */
9139 uint16_t const *pu16Src;
9140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9141 if (rc == VINF_SUCCESS)
9142 {
9143 *pu16Dst = *pu16Src;
9144 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9145 }
9146 return rc;
9147}
9148
9149
9150#ifdef IEM_WITH_SETJMP
9151/**
9152 * Fetches a data word, longjmp on error.
9153 *
9154 * @returns The word
9155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 */
9160DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9161{
9162 /* The lazy approach for now... */
9163 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9164 uint16_t const u16Ret = *pu16Src;
9165 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9166 return u16Ret;
9167}
9168#endif
9169
9170
9171/**
9172 * Fetches a data dword.
9173 *
9174 * @returns Strict VBox status code.
9175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9176 * @param pu32Dst Where to return the dword.
9177 * @param iSegReg The index of the segment register to use for
9178 * this access. The base and limits are checked.
9179 * @param GCPtrMem The address of the guest memory.
9180 */
9181IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9182{
9183 /* The lazy approach for now... */
9184 uint32_t const *pu32Src;
9185 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 if (rc == VINF_SUCCESS)
9187 {
9188 *pu32Dst = *pu32Src;
9189 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9190 }
9191 return rc;
9192}
9193
9194
9195#ifdef IEM_WITH_SETJMP
9196
9197IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9198{
9199 Assert(cbMem >= 1);
9200 Assert(iSegReg < X86_SREG_COUNT);
9201
9202 /*
9203 * 64-bit mode is simpler.
9204 */
9205 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9206 {
9207 if (iSegReg >= X86_SREG_FS)
9208 {
9209 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9210 GCPtrMem += pSel->u64Base;
9211 }
9212
9213 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9214 return GCPtrMem;
9215 }
9216 /*
9217 * 16-bit and 32-bit segmentation.
9218 */
9219 else
9220 {
9221 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9222 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9223 == X86DESCATTR_P /* data, expand up */
9224 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9225 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9226 {
9227 /* expand up */
9228 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9229 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9230 && GCPtrLast32 > (uint32_t)GCPtrMem))
9231 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9232 }
9233 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9234 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9235 {
9236 /* expand down */
9237 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9238 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9239 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9240 && GCPtrLast32 > (uint32_t)GCPtrMem))
9241 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9242 }
9243 else
9244 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9245 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9246 }
9247 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9248}
9249
9250
9251IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9252{
9253 Assert(cbMem >= 1);
9254 Assert(iSegReg < X86_SREG_COUNT);
9255
9256 /*
9257 * 64-bit mode is simpler.
9258 */
9259 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9260 {
9261 if (iSegReg >= X86_SREG_FS)
9262 {
9263 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9264 GCPtrMem += pSel->u64Base;
9265 }
9266
9267 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9268 return GCPtrMem;
9269 }
9270 /*
9271 * 16-bit and 32-bit segmentation.
9272 */
9273 else
9274 {
9275 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9276 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9277 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9278 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9279 {
9280 /* expand up */
9281 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9282 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9283 && GCPtrLast32 > (uint32_t)GCPtrMem))
9284 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9285 }
9286 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9287 {
9288 /* expand down */
9289 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9290 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9291 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9292 && GCPtrLast32 > (uint32_t)GCPtrMem))
9293 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9294 }
9295 else
9296 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9297 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9298 }
9299 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9300}
9301
9302
9303/**
9304 * Fetches a data dword, longjmp on error, fallback/safe version.
9305 *
9306 * @returns The dword
9307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9308 * @param iSegReg The index of the segment register to use for
9309 * this access. The base and limits are checked.
9310 * @param GCPtrMem The address of the guest memory.
9311 */
9312IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9313{
9314 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9315 uint32_t const u32Ret = *pu32Src;
9316 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9317 return u32Ret;
9318}
9319
9320
9321/**
9322 * Fetches a data dword, longjmp on error.
9323 *
9324 * @returns The dword
9325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9326 * @param iSegReg The index of the segment register to use for
9327 * this access. The base and limits are checked.
9328 * @param GCPtrMem The address of the guest memory.
9329 */
9330DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9331{
9332# ifdef IEM_WITH_DATA_TLB
9333 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9334 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9335 {
9336 /// @todo more later.
9337 }
9338
9339 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9340# else
9341 /* The lazy approach. */
9342 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9343 uint32_t const u32Ret = *pu32Src;
9344 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9345 return u32Ret;
9346# endif
9347}
9348#endif
9349
9350
9351#ifdef SOME_UNUSED_FUNCTION
9352/**
9353 * Fetches a data dword and sign extends it to a qword.
9354 *
9355 * @returns Strict VBox status code.
9356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9357 * @param pu64Dst Where to return the sign extended value.
9358 * @param iSegReg The index of the segment register to use for
9359 * this access. The base and limits are checked.
9360 * @param GCPtrMem The address of the guest memory.
9361 */
9362IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9363{
9364 /* The lazy approach for now... */
9365 int32_t const *pi32Src;
9366 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9367 if (rc == VINF_SUCCESS)
9368 {
9369 *pu64Dst = *pi32Src;
9370 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9371 }
9372#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9373 else
9374 *pu64Dst = 0;
9375#endif
9376 return rc;
9377}
9378#endif
9379
9380
9381/**
9382 * Fetches a data qword.
9383 *
9384 * @returns Strict VBox status code.
9385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9386 * @param pu64Dst Where to return the qword.
9387 * @param iSegReg The index of the segment register to use for
9388 * this access. The base and limits are checked.
9389 * @param GCPtrMem The address of the guest memory.
9390 */
9391IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9392{
9393 /* The lazy approach for now... */
9394 uint64_t const *pu64Src;
9395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9396 if (rc == VINF_SUCCESS)
9397 {
9398 *pu64Dst = *pu64Src;
9399 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9400 }
9401 return rc;
9402}
9403
9404
9405#ifdef IEM_WITH_SETJMP
9406/**
9407 * Fetches a data qword, longjmp on error.
9408 *
9409 * @returns The qword.
9410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9411 * @param iSegReg The index of the segment register to use for
9412 * this access. The base and limits are checked.
9413 * @param GCPtrMem The address of the guest memory.
9414 */
9415DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9416{
9417 /* The lazy approach for now... */
9418 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9419 uint64_t const u64Ret = *pu64Src;
9420 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9421 return u64Ret;
9422}
9423#endif
9424
9425
9426/**
9427 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9428 *
9429 * @returns Strict VBox status code.
9430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9431 * @param pu64Dst Where to return the qword.
9432 * @param iSegReg The index of the segment register to use for
9433 * this access. The base and limits are checked.
9434 * @param GCPtrMem The address of the guest memory.
9435 */
9436IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9437{
9438 /* The lazy approach for now... */
9439 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9440 if (RT_UNLIKELY(GCPtrMem & 15))
9441 return iemRaiseGeneralProtectionFault0(pVCpu);
9442
9443 uint64_t const *pu64Src;
9444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9445 if (rc == VINF_SUCCESS)
9446 {
9447 *pu64Dst = *pu64Src;
9448 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9449 }
9450 return rc;
9451}
9452
9453
9454#ifdef IEM_WITH_SETJMP
9455/**
9456 * Fetches a data qword, longjmp on error.
9457 *
9458 * @returns The qword.
9459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9460 * @param iSegReg The index of the segment register to use for
9461 * this access. The base and limits are checked.
9462 * @param GCPtrMem The address of the guest memory.
9463 */
9464DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9465{
9466 /* The lazy approach for now... */
9467 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9468 if (RT_LIKELY(!(GCPtrMem & 15)))
9469 {
9470 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9471 uint64_t const u64Ret = *pu64Src;
9472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9473 return u64Ret;
9474 }
9475
9476 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9477 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9478}
9479#endif
9480
9481
9482/**
9483 * Fetches a data tword.
9484 *
9485 * @returns Strict VBox status code.
9486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9487 * @param pr80Dst Where to return the tword.
9488 * @param iSegReg The index of the segment register to use for
9489 * this access. The base and limits are checked.
9490 * @param GCPtrMem The address of the guest memory.
9491 */
9492IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9493{
9494 /* The lazy approach for now... */
9495 PCRTFLOAT80U pr80Src;
9496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 if (rc == VINF_SUCCESS)
9498 {
9499 *pr80Dst = *pr80Src;
9500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9501 }
9502 return rc;
9503}
9504
9505
9506#ifdef IEM_WITH_SETJMP
9507/**
9508 * Fetches a data tword, longjmp on error.
9509 *
9510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9511 * @param pr80Dst Where to return the tword.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 */
9516DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9517{
9518 /* The lazy approach for now... */
9519 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9520 *pr80Dst = *pr80Src;
9521 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9522}
9523#endif
9524
9525
9526/**
9527 * Fetches a data dqword (double qword), generally SSE related.
9528 *
9529 * @returns Strict VBox status code.
9530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9531 * @param pu128Dst Where to return the qword.
9532 * @param iSegReg The index of the segment register to use for
9533 * this access. The base and limits are checked.
9534 * @param GCPtrMem The address of the guest memory.
9535 */
9536IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9537{
9538 /* The lazy approach for now... */
9539 PCRTUINT128U pu128Src;
9540 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9541 if (rc == VINF_SUCCESS)
9542 {
9543 pu128Dst->au64[0] = pu128Src->au64[0];
9544 pu128Dst->au64[1] = pu128Src->au64[1];
9545 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9546 }
9547 return rc;
9548}
9549
9550
9551#ifdef IEM_WITH_SETJMP
9552/**
9553 * Fetches a data dqword (double qword), generally SSE related.
9554 *
9555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9556 * @param pu128Dst Where to return the qword.
9557 * @param iSegReg The index of the segment register to use for
9558 * this access. The base and limits are checked.
9559 * @param GCPtrMem The address of the guest memory.
9560 */
9561IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9562{
9563 /* The lazy approach for now... */
9564 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9565 pu128Dst->au64[0] = pu128Src->au64[0];
9566 pu128Dst->au64[1] = pu128Src->au64[1];
9567 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9568}
9569#endif
9570
9571
9572/**
9573 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9574 * related.
9575 *
9576 * Raises \#GP(0) if not aligned.
9577 *
9578 * @returns Strict VBox status code.
9579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9580 * @param pu128Dst Where to return the qword.
9581 * @param iSegReg The index of the segment register to use for
9582 * this access. The base and limits are checked.
9583 * @param GCPtrMem The address of the guest memory.
9584 */
9585IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9586{
9587 /* The lazy approach for now... */
9588 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9589 if ( (GCPtrMem & 15)
9590 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9591 return iemRaiseGeneralProtectionFault0(pVCpu);
9592
9593 PCRTUINT128U pu128Src;
9594 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9595 if (rc == VINF_SUCCESS)
9596 {
9597 pu128Dst->au64[0] = pu128Src->au64[0];
9598 pu128Dst->au64[1] = pu128Src->au64[1];
9599 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9600 }
9601 return rc;
9602}
9603
9604
9605#ifdef IEM_WITH_SETJMP
9606/**
9607 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9608 * related, longjmp on error.
9609 *
9610 * Raises \#GP(0) if not aligned.
9611 *
9612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9613 * @param pu128Dst Where to return the qword.
9614 * @param iSegReg The index of the segment register to use for
9615 * this access. The base and limits are checked.
9616 * @param GCPtrMem The address of the guest memory.
9617 */
9618DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9619{
9620 /* The lazy approach for now... */
9621 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9622 if ( (GCPtrMem & 15) == 0
9623 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9624 {
9625 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9626 pu128Dst->au64[0] = pu128Src->au64[0];
9627 pu128Dst->au64[1] = pu128Src->au64[1];
9628 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9629 return;
9630 }
9631
9632 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9633 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9634}
9635#endif
9636
9637
9638/**
9639 * Fetches a data oword (octo word), generally AVX related.
9640 *
9641 * @returns Strict VBox status code.
9642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9643 * @param pu256Dst Where to return the qword.
9644 * @param iSegReg The index of the segment register to use for
9645 * this access. The base and limits are checked.
9646 * @param GCPtrMem The address of the guest memory.
9647 */
9648IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9649{
9650 /* The lazy approach for now... */
9651 PCRTUINT256U pu256Src;
9652 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9653 if (rc == VINF_SUCCESS)
9654 {
9655 pu256Dst->au64[0] = pu256Src->au64[0];
9656 pu256Dst->au64[1] = pu256Src->au64[1];
9657 pu256Dst->au64[2] = pu256Src->au64[2];
9658 pu256Dst->au64[3] = pu256Src->au64[3];
9659 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9660 }
9661 return rc;
9662}
9663
9664
9665#ifdef IEM_WITH_SETJMP
9666/**
9667 * Fetches a data oword (octo word), generally AVX related.
9668 *
9669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9670 * @param pu256Dst Where to return the qword.
9671 * @param iSegReg The index of the segment register to use for
9672 * this access. The base and limits are checked.
9673 * @param GCPtrMem The address of the guest memory.
9674 */
9675IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9676{
9677 /* The lazy approach for now... */
9678 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9679 pu256Dst->au64[0] = pu256Src->au64[0];
9680 pu256Dst->au64[1] = pu256Src->au64[1];
9681 pu256Dst->au64[2] = pu256Src->au64[2];
9682 pu256Dst->au64[3] = pu256Src->au64[3];
9683 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9684}
9685#endif
9686
9687
9688/**
9689 * Fetches a data oword (octo word) at an aligned address, generally AVX
9690 * related.
9691 *
9692 * Raises \#GP(0) if not aligned.
9693 *
9694 * @returns Strict VBox status code.
9695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9696 * @param pu256Dst Where to return the qword.
9697 * @param iSegReg The index of the segment register to use for
9698 * this access. The base and limits are checked.
9699 * @param GCPtrMem The address of the guest memory.
9700 */
9701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9702{
9703 /* The lazy approach for now... */
9704 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9705 if (GCPtrMem & 31)
9706 return iemRaiseGeneralProtectionFault0(pVCpu);
9707
9708 PCRTUINT256U pu256Src;
9709 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9710 if (rc == VINF_SUCCESS)
9711 {
9712 pu256Dst->au64[0] = pu256Src->au64[0];
9713 pu256Dst->au64[1] = pu256Src->au64[1];
9714 pu256Dst->au64[2] = pu256Src->au64[2];
9715 pu256Dst->au64[3] = pu256Src->au64[3];
9716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9717 }
9718 return rc;
9719}
9720
9721
9722#ifdef IEM_WITH_SETJMP
9723/**
9724 * Fetches a data oword (octo word) at an aligned address, generally AVX
9725 * related, longjmp on error.
9726 *
9727 * Raises \#GP(0) if not aligned.
9728 *
9729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9730 * @param pu256Dst Where to return the qword.
9731 * @param iSegReg The index of the segment register to use for
9732 * this access. The base and limits are checked.
9733 * @param GCPtrMem The address of the guest memory.
9734 */
9735DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9736{
9737 /* The lazy approach for now... */
9738 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9739 if ((GCPtrMem & 31) == 0)
9740 {
9741 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9742 pu256Dst->au64[0] = pu256Src->au64[0];
9743 pu256Dst->au64[1] = pu256Src->au64[1];
9744 pu256Dst->au64[2] = pu256Src->au64[2];
9745 pu256Dst->au64[3] = pu256Src->au64[3];
9746 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9747 return;
9748 }
9749
9750 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9751 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9752}
9753#endif
9754
9755
9756
9757/**
9758 * Fetches a descriptor register (lgdt, lidt).
9759 *
9760 * @returns Strict VBox status code.
9761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9762 * @param pcbLimit Where to return the limit.
9763 * @param pGCPtrBase Where to return the base.
9764 * @param iSegReg The index of the segment register to use for
9765 * this access. The base and limits are checked.
9766 * @param GCPtrMem The address of the guest memory.
9767 * @param enmOpSize The effective operand size.
9768 */
9769IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9770 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9771{
9772 /*
9773 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9774 * little special:
9775 * - The two reads are done separately.
9776 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9777 * - We suspect the 386 to actually commit the limit before the base in
9778 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9779 * don't try emulate this eccentric behavior, because it's not well
9780 * enough understood and rather hard to trigger.
9781 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9782 */
9783 VBOXSTRICTRC rcStrict;
9784 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9785 {
9786 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9787 if (rcStrict == VINF_SUCCESS)
9788 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9789 }
9790 else
9791 {
9792 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9793 if (enmOpSize == IEMMODE_32BIT)
9794 {
9795 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9796 {
9797 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9798 if (rcStrict == VINF_SUCCESS)
9799 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9800 }
9801 else
9802 {
9803 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9804 if (rcStrict == VINF_SUCCESS)
9805 {
9806 *pcbLimit = (uint16_t)uTmp;
9807 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9808 }
9809 }
9810 if (rcStrict == VINF_SUCCESS)
9811 *pGCPtrBase = uTmp;
9812 }
9813 else
9814 {
9815 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9816 if (rcStrict == VINF_SUCCESS)
9817 {
9818 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9819 if (rcStrict == VINF_SUCCESS)
9820 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9821 }
9822 }
9823 }
9824 return rcStrict;
9825}
9826
9827
9828
9829/**
9830 * Stores a data byte.
9831 *
9832 * @returns Strict VBox status code.
9833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9834 * @param iSegReg The index of the segment register to use for
9835 * this access. The base and limits are checked.
9836 * @param GCPtrMem The address of the guest memory.
9837 * @param u8Value The value to store.
9838 */
9839IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9840{
9841 /* The lazy approach for now... */
9842 uint8_t *pu8Dst;
9843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9844 if (rc == VINF_SUCCESS)
9845 {
9846 *pu8Dst = u8Value;
9847 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9848 }
9849 return rc;
9850}
9851
9852
9853#ifdef IEM_WITH_SETJMP
9854/**
9855 * Stores a data byte, longjmp on error.
9856 *
9857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9858 * @param iSegReg The index of the segment register to use for
9859 * this access. The base and limits are checked.
9860 * @param GCPtrMem The address of the guest memory.
9861 * @param u8Value The value to store.
9862 */
9863IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9864{
9865 /* The lazy approach for now... */
9866 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9867 *pu8Dst = u8Value;
9868 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9869}
9870#endif
9871
9872
9873/**
9874 * Stores a data word.
9875 *
9876 * @returns Strict VBox status code.
9877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9878 * @param iSegReg The index of the segment register to use for
9879 * this access. The base and limits are checked.
9880 * @param GCPtrMem The address of the guest memory.
9881 * @param u16Value The value to store.
9882 */
9883IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9884{
9885 /* The lazy approach for now... */
9886 uint16_t *pu16Dst;
9887 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9888 if (rc == VINF_SUCCESS)
9889 {
9890 *pu16Dst = u16Value;
9891 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9892 }
9893 return rc;
9894}
9895
9896
9897#ifdef IEM_WITH_SETJMP
9898/**
9899 * Stores a data word, longjmp on error.
9900 *
9901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9902 * @param iSegReg The index of the segment register to use for
9903 * this access. The base and limits are checked.
9904 * @param GCPtrMem The address of the guest memory.
9905 * @param u16Value The value to store.
9906 */
9907IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9908{
9909 /* The lazy approach for now... */
9910 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9911 *pu16Dst = u16Value;
9912 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9913}
9914#endif
9915
9916
9917/**
9918 * Stores a data dword.
9919 *
9920 * @returns Strict VBox status code.
9921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9922 * @param iSegReg The index of the segment register to use for
9923 * this access. The base and limits are checked.
9924 * @param GCPtrMem The address of the guest memory.
9925 * @param u32Value The value to store.
9926 */
9927IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9928{
9929 /* The lazy approach for now... */
9930 uint32_t *pu32Dst;
9931 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9932 if (rc == VINF_SUCCESS)
9933 {
9934 *pu32Dst = u32Value;
9935 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9936 }
9937 return rc;
9938}
9939
9940
9941#ifdef IEM_WITH_SETJMP
9942/**
9943 * Stores a data dword.
9944 *
9945 * @returns Strict VBox status code.
9946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9947 * @param iSegReg The index of the segment register to use for
9948 * this access. The base and limits are checked.
9949 * @param GCPtrMem The address of the guest memory.
9950 * @param u32Value The value to store.
9951 */
9952IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9953{
9954 /* The lazy approach for now... */
9955 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9956 *pu32Dst = u32Value;
9957 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9958}
9959#endif
9960
9961
9962/**
9963 * Stores a data qword.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param iSegReg The index of the segment register to use for
9968 * this access. The base and limits are checked.
9969 * @param GCPtrMem The address of the guest memory.
9970 * @param u64Value The value to store.
9971 */
9972IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9973{
9974 /* The lazy approach for now... */
9975 uint64_t *pu64Dst;
9976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9977 if (rc == VINF_SUCCESS)
9978 {
9979 *pu64Dst = u64Value;
9980 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9981 }
9982 return rc;
9983}
9984
9985
9986#ifdef IEM_WITH_SETJMP
9987/**
9988 * Stores a data qword, longjmp on error.
9989 *
9990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9991 * @param iSegReg The index of the segment register to use for
9992 * this access. The base and limits are checked.
9993 * @param GCPtrMem The address of the guest memory.
9994 * @param u64Value The value to store.
9995 */
9996IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9997{
9998 /* The lazy approach for now... */
9999 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10000 *pu64Dst = u64Value;
10001 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10002}
10003#endif
10004
10005
10006/**
10007 * Stores a data dqword.
10008 *
10009 * @returns Strict VBox status code.
10010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10011 * @param iSegReg The index of the segment register to use for
10012 * this access. The base and limits are checked.
10013 * @param GCPtrMem The address of the guest memory.
10014 * @param u128Value The value to store.
10015 */
10016IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10017{
10018 /* The lazy approach for now... */
10019 PRTUINT128U pu128Dst;
10020 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10021 if (rc == VINF_SUCCESS)
10022 {
10023 pu128Dst->au64[0] = u128Value.au64[0];
10024 pu128Dst->au64[1] = u128Value.au64[1];
10025 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10026 }
10027 return rc;
10028}
10029
10030
10031#ifdef IEM_WITH_SETJMP
10032/**
10033 * Stores a data dqword, longjmp on error.
10034 *
10035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10036 * @param iSegReg The index of the segment register to use for
10037 * this access. The base and limits are checked.
10038 * @param GCPtrMem The address of the guest memory.
10039 * @param u128Value The value to store.
10040 */
10041IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10042{
10043 /* The lazy approach for now... */
10044 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10045 pu128Dst->au64[0] = u128Value.au64[0];
10046 pu128Dst->au64[1] = u128Value.au64[1];
10047 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10048}
10049#endif
10050
10051
10052/**
10053 * Stores a data dqword, SSE aligned.
10054 *
10055 * @returns Strict VBox status code.
10056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10057 * @param iSegReg The index of the segment register to use for
10058 * this access. The base and limits are checked.
10059 * @param GCPtrMem The address of the guest memory.
10060 * @param u128Value The value to store.
10061 */
10062IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10063{
10064 /* The lazy approach for now... */
10065 if ( (GCPtrMem & 15)
10066 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10067 return iemRaiseGeneralProtectionFault0(pVCpu);
10068
10069 PRTUINT128U pu128Dst;
10070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10071 if (rc == VINF_SUCCESS)
10072 {
10073 pu128Dst->au64[0] = u128Value.au64[0];
10074 pu128Dst->au64[1] = u128Value.au64[1];
10075 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10076 }
10077 return rc;
10078}
10079
10080
10081#ifdef IEM_WITH_SETJMP
10082/**
10083 * Stores a data dqword, SSE aligned.
10084 *
10085 * @returns Strict VBox status code.
10086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10087 * @param iSegReg The index of the segment register to use for
10088 * this access. The base and limits are checked.
10089 * @param GCPtrMem The address of the guest memory.
10090 * @param u128Value The value to store.
10091 */
10092DECL_NO_INLINE(IEM_STATIC, void)
10093iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10094{
10095 /* The lazy approach for now... */
10096 if ( (GCPtrMem & 15) == 0
10097 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10098 {
10099 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10100 pu128Dst->au64[0] = u128Value.au64[0];
10101 pu128Dst->au64[1] = u128Value.au64[1];
10102 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10103 return;
10104 }
10105
10106 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10107 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10108}
10109#endif
10110
10111
10112/**
10113 * Stores a data dqword.
10114 *
10115 * @returns Strict VBox status code.
10116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10117 * @param iSegReg The index of the segment register to use for
10118 * this access. The base and limits are checked.
10119 * @param GCPtrMem The address of the guest memory.
10120 * @param pu256Value Pointer to the value to store.
10121 */
10122IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10123{
10124 /* The lazy approach for now... */
10125 PRTUINT256U pu256Dst;
10126 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10127 if (rc == VINF_SUCCESS)
10128 {
10129 pu256Dst->au64[0] = pu256Value->au64[0];
10130 pu256Dst->au64[1] = pu256Value->au64[1];
10131 pu256Dst->au64[2] = pu256Value->au64[2];
10132 pu256Dst->au64[3] = pu256Value->au64[3];
10133 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10134 }
10135 return rc;
10136}
10137
10138
10139#ifdef IEM_WITH_SETJMP
10140/**
10141 * Stores a data dqword, longjmp on error.
10142 *
10143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10144 * @param iSegReg The index of the segment register to use for
10145 * this access. The base and limits are checked.
10146 * @param GCPtrMem The address of the guest memory.
10147 * @param pu256Value Pointer to the value to store.
10148 */
10149IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10150{
10151 /* The lazy approach for now... */
10152 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10153 pu256Dst->au64[0] = pu256Value->au64[0];
10154 pu256Dst->au64[1] = pu256Value->au64[1];
10155 pu256Dst->au64[2] = pu256Value->au64[2];
10156 pu256Dst->au64[3] = pu256Value->au64[3];
10157 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10158}
10159#endif
10160
10161
10162/**
10163 * Stores a data dqword, AVX aligned.
10164 *
10165 * @returns Strict VBox status code.
10166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10167 * @param iSegReg The index of the segment register to use for
10168 * this access. The base and limits are checked.
10169 * @param GCPtrMem The address of the guest memory.
10170 * @param pu256Value Pointer to the value to store.
10171 */
10172IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10173{
10174 /* The lazy approach for now... */
10175 if (GCPtrMem & 31)
10176 return iemRaiseGeneralProtectionFault0(pVCpu);
10177
10178 PRTUINT256U pu256Dst;
10179 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10180 if (rc == VINF_SUCCESS)
10181 {
10182 pu256Dst->au64[0] = pu256Value->au64[0];
10183 pu256Dst->au64[1] = pu256Value->au64[1];
10184 pu256Dst->au64[2] = pu256Value->au64[2];
10185 pu256Dst->au64[3] = pu256Value->au64[3];
10186 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10187 }
10188 return rc;
10189}
10190
10191
10192#ifdef IEM_WITH_SETJMP
10193/**
10194 * Stores a data dqword, AVX aligned.
10195 *
10196 * @returns Strict VBox status code.
10197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10198 * @param iSegReg The index of the segment register to use for
10199 * this access. The base and limits are checked.
10200 * @param GCPtrMem The address of the guest memory.
10201 * @param pu256Value Pointer to the value to store.
10202 */
10203DECL_NO_INLINE(IEM_STATIC, void)
10204iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10205{
10206 /* The lazy approach for now... */
10207 if ((GCPtrMem & 31) == 0)
10208 {
10209 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10210 pu256Dst->au64[0] = pu256Value->au64[0];
10211 pu256Dst->au64[1] = pu256Value->au64[1];
10212 pu256Dst->au64[2] = pu256Value->au64[2];
10213 pu256Dst->au64[3] = pu256Value->au64[3];
10214 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10215 return;
10216 }
10217
10218 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10219 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10220}
10221#endif
10222
10223
10224/**
10225 * Stores a descriptor register (sgdt, sidt).
10226 *
10227 * @returns Strict VBox status code.
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param cbLimit The limit.
10230 * @param GCPtrBase The base address.
10231 * @param iSegReg The index of the segment register to use for
10232 * this access. The base and limits are checked.
10233 * @param GCPtrMem The address of the guest memory.
10234 */
10235IEM_STATIC VBOXSTRICTRC
10236iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10237{
10238 VBOXSTRICTRC rcStrict;
10239 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10240 {
10241 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10242 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10243 }
10244
10245 /*
10246 * The SIDT and SGDT instructions actually stores the data using two
10247 * independent writes. The instructions does not respond to opsize prefixes.
10248 */
10249 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10250 if (rcStrict == VINF_SUCCESS)
10251 {
10252 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10253 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10254 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10255 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10256 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10257 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10258 else
10259 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10260 }
10261 return rcStrict;
10262}
10263
10264
10265/**
10266 * Pushes a word onto the stack.
10267 *
10268 * @returns Strict VBox status code.
10269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10270 * @param u16Value The value to push.
10271 */
10272IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10273{
10274 /* Increment the stack pointer. */
10275 uint64_t uNewRsp;
10276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10277 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10278
10279 /* Write the word the lazy way. */
10280 uint16_t *pu16Dst;
10281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10282 if (rc == VINF_SUCCESS)
10283 {
10284 *pu16Dst = u16Value;
10285 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10286 }
10287
10288 /* Commit the new RSP value unless we an access handler made trouble. */
10289 if (rc == VINF_SUCCESS)
10290 pCtx->rsp = uNewRsp;
10291
10292 return rc;
10293}
10294
10295
10296/**
10297 * Pushes a dword onto the stack.
10298 *
10299 * @returns Strict VBox status code.
10300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10301 * @param u32Value The value to push.
10302 */
10303IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10304{
10305 /* Increment the stack pointer. */
10306 uint64_t uNewRsp;
10307 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10308 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10309
10310 /* Write the dword the lazy way. */
10311 uint32_t *pu32Dst;
10312 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10313 if (rc == VINF_SUCCESS)
10314 {
10315 *pu32Dst = u32Value;
10316 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10317 }
10318
10319 /* Commit the new RSP value unless we an access handler made trouble. */
10320 if (rc == VINF_SUCCESS)
10321 pCtx->rsp = uNewRsp;
10322
10323 return rc;
10324}
10325
10326
10327/**
10328 * Pushes a dword segment register value onto the stack.
10329 *
10330 * @returns Strict VBox status code.
10331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10332 * @param u32Value The value to push.
10333 */
10334IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10335{
10336 /* Increment the stack pointer. */
10337 uint64_t uNewRsp;
10338 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10339 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10340
10341 VBOXSTRICTRC rc;
10342 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10343 {
10344 /* The recompiler writes a full dword. */
10345 uint32_t *pu32Dst;
10346 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10347 if (rc == VINF_SUCCESS)
10348 {
10349 *pu32Dst = u32Value;
10350 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10351 }
10352 }
10353 else
10354 {
10355 /* The intel docs talks about zero extending the selector register
10356 value. My actual intel CPU here might be zero extending the value
10357 but it still only writes the lower word... */
10358 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10359 * happens when crossing an electric page boundrary, is the high word checked
10360 * for write accessibility or not? Probably it is. What about segment limits?
10361 * It appears this behavior is also shared with trap error codes.
10362 *
10363 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10364 * ancient hardware when it actually did change. */
10365 uint16_t *pu16Dst;
10366 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10367 if (rc == VINF_SUCCESS)
10368 {
10369 *pu16Dst = (uint16_t)u32Value;
10370 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10371 }
10372 }
10373
10374 /* Commit the new RSP value unless we an access handler made trouble. */
10375 if (rc == VINF_SUCCESS)
10376 pCtx->rsp = uNewRsp;
10377
10378 return rc;
10379}
10380
10381
10382/**
10383 * Pushes a qword onto the stack.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10387 * @param u64Value The value to push.
10388 */
10389IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10390{
10391 /* Increment the stack pointer. */
10392 uint64_t uNewRsp;
10393 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10394 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10395
10396 /* Write the word the lazy way. */
10397 uint64_t *pu64Dst;
10398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu64Dst = u64Value;
10402 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10403 }
10404
10405 /* Commit the new RSP value unless we an access handler made trouble. */
10406 if (rc == VINF_SUCCESS)
10407 pCtx->rsp = uNewRsp;
10408
10409 return rc;
10410}
10411
10412
10413/**
10414 * Pops a word from the stack.
10415 *
10416 * @returns Strict VBox status code.
10417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10418 * @param pu16Value Where to store the popped value.
10419 */
10420IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10421{
10422 /* Increment the stack pointer. */
10423 uint64_t uNewRsp;
10424 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10425 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10426
10427 /* Write the word the lazy way. */
10428 uint16_t const *pu16Src;
10429 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10430 if (rc == VINF_SUCCESS)
10431 {
10432 *pu16Value = *pu16Src;
10433 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10434
10435 /* Commit the new RSP value. */
10436 if (rc == VINF_SUCCESS)
10437 pCtx->rsp = uNewRsp;
10438 }
10439
10440 return rc;
10441}
10442
10443
10444/**
10445 * Pops a dword from the stack.
10446 *
10447 * @returns Strict VBox status code.
10448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10449 * @param pu32Value Where to store the popped value.
10450 */
10451IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10452{
10453 /* Increment the stack pointer. */
10454 uint64_t uNewRsp;
10455 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10456 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10457
10458 /* Write the word the lazy way. */
10459 uint32_t const *pu32Src;
10460 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10461 if (rc == VINF_SUCCESS)
10462 {
10463 *pu32Value = *pu32Src;
10464 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10465
10466 /* Commit the new RSP value. */
10467 if (rc == VINF_SUCCESS)
10468 pCtx->rsp = uNewRsp;
10469 }
10470
10471 return rc;
10472}
10473
10474
10475/**
10476 * Pops a qword from the stack.
10477 *
10478 * @returns Strict VBox status code.
10479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10480 * @param pu64Value Where to store the popped value.
10481 */
10482IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10483{
10484 /* Increment the stack pointer. */
10485 uint64_t uNewRsp;
10486 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10487 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10488
10489 /* Write the word the lazy way. */
10490 uint64_t const *pu64Src;
10491 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10492 if (rc == VINF_SUCCESS)
10493 {
10494 *pu64Value = *pu64Src;
10495 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10496
10497 /* Commit the new RSP value. */
10498 if (rc == VINF_SUCCESS)
10499 pCtx->rsp = uNewRsp;
10500 }
10501
10502 return rc;
10503}
10504
10505
10506/**
10507 * Pushes a word onto the stack, using a temporary stack pointer.
10508 *
10509 * @returns Strict VBox status code.
10510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10511 * @param u16Value The value to push.
10512 * @param pTmpRsp Pointer to the temporary stack pointer.
10513 */
10514IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10515{
10516 /* Increment the stack pointer. */
10517 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10518 RTUINT64U NewRsp = *pTmpRsp;
10519 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10520
10521 /* Write the word the lazy way. */
10522 uint16_t *pu16Dst;
10523 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10524 if (rc == VINF_SUCCESS)
10525 {
10526 *pu16Dst = u16Value;
10527 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10528 }
10529
10530 /* Commit the new RSP value unless we an access handler made trouble. */
10531 if (rc == VINF_SUCCESS)
10532 *pTmpRsp = NewRsp;
10533
10534 return rc;
10535}
10536
10537
10538/**
10539 * Pushes a dword onto the stack, using a temporary stack pointer.
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10543 * @param u32Value The value to push.
10544 * @param pTmpRsp Pointer to the temporary stack pointer.
10545 */
10546IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10547{
10548 /* Increment the stack pointer. */
10549 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10550 RTUINT64U NewRsp = *pTmpRsp;
10551 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10552
10553 /* Write the word the lazy way. */
10554 uint32_t *pu32Dst;
10555 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10556 if (rc == VINF_SUCCESS)
10557 {
10558 *pu32Dst = u32Value;
10559 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10560 }
10561
10562 /* Commit the new RSP value unless we an access handler made trouble. */
10563 if (rc == VINF_SUCCESS)
10564 *pTmpRsp = NewRsp;
10565
10566 return rc;
10567}
10568
10569
10570/**
10571 * Pushes a dword onto the stack, using a temporary stack pointer.
10572 *
10573 * @returns Strict VBox status code.
10574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10575 * @param u64Value The value to push.
10576 * @param pTmpRsp Pointer to the temporary stack pointer.
10577 */
10578IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10579{
10580 /* Increment the stack pointer. */
10581 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10582 RTUINT64U NewRsp = *pTmpRsp;
10583 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10584
10585 /* Write the word the lazy way. */
10586 uint64_t *pu64Dst;
10587 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10588 if (rc == VINF_SUCCESS)
10589 {
10590 *pu64Dst = u64Value;
10591 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10592 }
10593
10594 /* Commit the new RSP value unless we an access handler made trouble. */
10595 if (rc == VINF_SUCCESS)
10596 *pTmpRsp = NewRsp;
10597
10598 return rc;
10599}
10600
10601
10602/**
10603 * Pops a word from the stack, using a temporary stack pointer.
10604 *
10605 * @returns Strict VBox status code.
10606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10607 * @param pu16Value Where to store the popped value.
10608 * @param pTmpRsp Pointer to the temporary stack pointer.
10609 */
10610IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10611{
10612 /* Increment the stack pointer. */
10613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10614 RTUINT64U NewRsp = *pTmpRsp;
10615 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10616
10617 /* Write the word the lazy way. */
10618 uint16_t const *pu16Src;
10619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10620 if (rc == VINF_SUCCESS)
10621 {
10622 *pu16Value = *pu16Src;
10623 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10624
10625 /* Commit the new RSP value. */
10626 if (rc == VINF_SUCCESS)
10627 *pTmpRsp = NewRsp;
10628 }
10629
10630 return rc;
10631}
10632
10633
10634/**
10635 * Pops a dword from the stack, using a temporary stack pointer.
10636 *
10637 * @returns Strict VBox status code.
10638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10639 * @param pu32Value Where to store the popped value.
10640 * @param pTmpRsp Pointer to the temporary stack pointer.
10641 */
10642IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10643{
10644 /* Increment the stack pointer. */
10645 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10646 RTUINT64U NewRsp = *pTmpRsp;
10647 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10648
10649 /* Write the word the lazy way. */
10650 uint32_t const *pu32Src;
10651 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10652 if (rc == VINF_SUCCESS)
10653 {
10654 *pu32Value = *pu32Src;
10655 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10656
10657 /* Commit the new RSP value. */
10658 if (rc == VINF_SUCCESS)
10659 *pTmpRsp = NewRsp;
10660 }
10661
10662 return rc;
10663}
10664
10665
10666/**
10667 * Pops a qword from the stack, using a temporary stack pointer.
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10671 * @param pu64Value Where to store the popped value.
10672 * @param pTmpRsp Pointer to the temporary stack pointer.
10673 */
10674IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10675{
10676 /* Increment the stack pointer. */
10677 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10678 RTUINT64U NewRsp = *pTmpRsp;
10679 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10680
10681 /* Write the word the lazy way. */
10682 uint64_t const *pu64Src;
10683 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10684 if (rcStrict == VINF_SUCCESS)
10685 {
10686 *pu64Value = *pu64Src;
10687 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10688
10689 /* Commit the new RSP value. */
10690 if (rcStrict == VINF_SUCCESS)
10691 *pTmpRsp = NewRsp;
10692 }
10693
10694 return rcStrict;
10695}
10696
10697
10698/**
10699 * Begin a special stack push (used by interrupt, exceptions and such).
10700 *
10701 * This will raise \#SS or \#PF if appropriate.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param cbMem The number of bytes to push onto the stack.
10706 * @param ppvMem Where to return the pointer to the stack memory.
10707 * As with the other memory functions this could be
10708 * direct access or bounce buffered access, so
10709 * don't commit register until the commit call
10710 * succeeds.
10711 * @param puNewRsp Where to return the new RSP value. This must be
10712 * passed unchanged to
10713 * iemMemStackPushCommitSpecial().
10714 */
10715IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10716{
10717 Assert(cbMem < UINT8_MAX);
10718 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10719 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10720 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10721}
10722
10723
10724/**
10725 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10726 *
10727 * This will update the rSP.
10728 *
10729 * @returns Strict VBox status code.
10730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10731 * @param pvMem The pointer returned by
10732 * iemMemStackPushBeginSpecial().
10733 * @param uNewRsp The new RSP value returned by
10734 * iemMemStackPushBeginSpecial().
10735 */
10736IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10737{
10738 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10739 if (rcStrict == VINF_SUCCESS)
10740 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10741 return rcStrict;
10742}
10743
10744
10745/**
10746 * Begin a special stack pop (used by iret, retf and such).
10747 *
10748 * This will raise \#SS or \#PF if appropriate.
10749 *
10750 * @returns Strict VBox status code.
10751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10752 * @param cbMem The number of bytes to pop from the stack.
10753 * @param ppvMem Where to return the pointer to the stack memory.
10754 * @param puNewRsp Where to return the new RSP value. This must be
10755 * assigned to CPUMCTX::rsp manually some time
10756 * after iemMemStackPopDoneSpecial() has been
10757 * called.
10758 */
10759IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10760{
10761 Assert(cbMem < UINT8_MAX);
10762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10763 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10764 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10765}
10766
10767
10768/**
10769 * Continue a special stack pop (used by iret and retf).
10770 *
10771 * This will raise \#SS or \#PF if appropriate.
10772 *
10773 * @returns Strict VBox status code.
10774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10775 * @param cbMem The number of bytes to pop from the stack.
10776 * @param ppvMem Where to return the pointer to the stack memory.
10777 * @param puNewRsp Where to return the new RSP value. This must be
10778 * assigned to CPUMCTX::rsp manually some time
10779 * after iemMemStackPopDoneSpecial() has been
10780 * called.
10781 */
10782IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10783{
10784 Assert(cbMem < UINT8_MAX);
10785 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10786 RTUINT64U NewRsp;
10787 NewRsp.u = *puNewRsp;
10788 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10789 *puNewRsp = NewRsp.u;
10790 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10791}
10792
10793
10794/**
10795 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10796 * iemMemStackPopContinueSpecial).
10797 *
10798 * The caller will manually commit the rSP.
10799 *
10800 * @returns Strict VBox status code.
10801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10802 * @param pvMem The pointer returned by
10803 * iemMemStackPopBeginSpecial() or
10804 * iemMemStackPopContinueSpecial().
10805 */
10806IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10807{
10808 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10809}
10810
10811
10812/**
10813 * Fetches a system table byte.
10814 *
10815 * @returns Strict VBox status code.
10816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10817 * @param pbDst Where to return the byte.
10818 * @param iSegReg The index of the segment register to use for
10819 * this access. The base and limits are checked.
10820 * @param GCPtrMem The address of the guest memory.
10821 */
10822IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10823{
10824 /* The lazy approach for now... */
10825 uint8_t const *pbSrc;
10826 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10827 if (rc == VINF_SUCCESS)
10828 {
10829 *pbDst = *pbSrc;
10830 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10831 }
10832 return rc;
10833}
10834
10835
10836/**
10837 * Fetches a system table word.
10838 *
10839 * @returns Strict VBox status code.
10840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10841 * @param pu16Dst Where to return the word.
10842 * @param iSegReg The index of the segment register to use for
10843 * this access. The base and limits are checked.
10844 * @param GCPtrMem The address of the guest memory.
10845 */
10846IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10847{
10848 /* The lazy approach for now... */
10849 uint16_t const *pu16Src;
10850 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10851 if (rc == VINF_SUCCESS)
10852 {
10853 *pu16Dst = *pu16Src;
10854 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10855 }
10856 return rc;
10857}
10858
10859
10860/**
10861 * Fetches a system table dword.
10862 *
10863 * @returns Strict VBox status code.
10864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10865 * @param pu32Dst Where to return the dword.
10866 * @param iSegReg The index of the segment register to use for
10867 * this access. The base and limits are checked.
10868 * @param GCPtrMem The address of the guest memory.
10869 */
10870IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10871{
10872 /* The lazy approach for now... */
10873 uint32_t const *pu32Src;
10874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10875 if (rc == VINF_SUCCESS)
10876 {
10877 *pu32Dst = *pu32Src;
10878 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10879 }
10880 return rc;
10881}
10882
10883
10884/**
10885 * Fetches a system table qword.
10886 *
10887 * @returns Strict VBox status code.
10888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10889 * @param pu64Dst Where to return the qword.
10890 * @param iSegReg The index of the segment register to use for
10891 * this access. The base and limits are checked.
10892 * @param GCPtrMem The address of the guest memory.
10893 */
10894IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10895{
10896 /* The lazy approach for now... */
10897 uint64_t const *pu64Src;
10898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10899 if (rc == VINF_SUCCESS)
10900 {
10901 *pu64Dst = *pu64Src;
10902 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10903 }
10904 return rc;
10905}
10906
10907
10908/**
10909 * Fetches a descriptor table entry with caller specified error code.
10910 *
10911 * @returns Strict VBox status code.
10912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10913 * @param pDesc Where to return the descriptor table entry.
10914 * @param uSel The selector which table entry to fetch.
10915 * @param uXcpt The exception to raise on table lookup error.
10916 * @param uErrorCode The error code associated with the exception.
10917 */
10918IEM_STATIC VBOXSTRICTRC
10919iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10920{
10921 AssertPtr(pDesc);
10922 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10923
10924 /** @todo did the 286 require all 8 bytes to be accessible? */
10925 /*
10926 * Get the selector table base and check bounds.
10927 */
10928 RTGCPTR GCPtrBase;
10929 if (uSel & X86_SEL_LDT)
10930 {
10931 if ( !pCtx->ldtr.Attr.n.u1Present
10932 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10933 {
10934 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10935 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10936 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10937 uErrorCode, 0);
10938 }
10939
10940 Assert(pCtx->ldtr.Attr.n.u1Present);
10941 GCPtrBase = pCtx->ldtr.u64Base;
10942 }
10943 else
10944 {
10945 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10946 {
10947 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10948 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10949 uErrorCode, 0);
10950 }
10951 GCPtrBase = pCtx->gdtr.pGdt;
10952 }
10953
10954 /*
10955 * Read the legacy descriptor and maybe the long mode extensions if
10956 * required.
10957 */
10958 VBOXSTRICTRC rcStrict;
10959 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10960 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10961 else
10962 {
10963 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10964 if (rcStrict == VINF_SUCCESS)
10965 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10966 if (rcStrict == VINF_SUCCESS)
10967 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10968 if (rcStrict == VINF_SUCCESS)
10969 pDesc->Legacy.au16[3] = 0;
10970 else
10971 return rcStrict;
10972 }
10973
10974 if (rcStrict == VINF_SUCCESS)
10975 {
10976 if ( !IEM_IS_LONG_MODE(pVCpu)
10977 || pDesc->Legacy.Gen.u1DescType)
10978 pDesc->Long.au64[1] = 0;
10979 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10980 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10981 else
10982 {
10983 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10984 /** @todo is this the right exception? */
10985 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10986 }
10987 }
10988 return rcStrict;
10989}
10990
10991
10992/**
10993 * Fetches a descriptor table entry.
10994 *
10995 * @returns Strict VBox status code.
10996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10997 * @param pDesc Where to return the descriptor table entry.
10998 * @param uSel The selector which table entry to fetch.
10999 * @param uXcpt The exception to raise on table lookup error.
11000 */
11001IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11002{
11003 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11004}
11005
11006
11007/**
11008 * Fakes a long mode stack selector for SS = 0.
11009 *
11010 * @param pDescSs Where to return the fake stack descriptor.
11011 * @param uDpl The DPL we want.
11012 */
11013IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11014{
11015 pDescSs->Long.au64[0] = 0;
11016 pDescSs->Long.au64[1] = 0;
11017 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11018 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11019 pDescSs->Long.Gen.u2Dpl = uDpl;
11020 pDescSs->Long.Gen.u1Present = 1;
11021 pDescSs->Long.Gen.u1Long = 1;
11022}
11023
11024
11025/**
11026 * Marks the selector descriptor as accessed (only non-system descriptors).
11027 *
11028 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11029 * will therefore skip the limit checks.
11030 *
11031 * @returns Strict VBox status code.
11032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11033 * @param uSel The selector.
11034 */
11035IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11036{
11037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11038
11039 /*
11040 * Get the selector table base and calculate the entry address.
11041 */
11042 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11043 ? pCtx->ldtr.u64Base
11044 : pCtx->gdtr.pGdt;
11045 GCPtr += uSel & X86_SEL_MASK;
11046
11047 /*
11048 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11049 * ugly stuff to avoid this. This will make sure it's an atomic access
11050 * as well more or less remove any question about 8-bit or 32-bit accesss.
11051 */
11052 VBOXSTRICTRC rcStrict;
11053 uint32_t volatile *pu32;
11054 if ((GCPtr & 3) == 0)
11055 {
11056 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11057 GCPtr += 2 + 2;
11058 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11059 if (rcStrict != VINF_SUCCESS)
11060 return rcStrict;
11061 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11062 }
11063 else
11064 {
11065 /* The misaligned GDT/LDT case, map the whole thing. */
11066 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11067 if (rcStrict != VINF_SUCCESS)
11068 return rcStrict;
11069 switch ((uintptr_t)pu32 & 3)
11070 {
11071 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11072 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11073 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11074 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11075 }
11076 }
11077
11078 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11079}
11080
11081/** @} */
11082
11083
11084/*
11085 * Include the C/C++ implementation of instruction.
11086 */
11087#include "IEMAllCImpl.cpp.h"
11088
11089
11090
11091/** @name "Microcode" macros.
11092 *
11093 * The idea is that we should be able to use the same code to interpret
11094 * instructions as well as recompiler instructions. Thus this obfuscation.
11095 *
11096 * @{
11097 */
11098#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11099#define IEM_MC_END() }
11100#define IEM_MC_PAUSE() do {} while (0)
11101#define IEM_MC_CONTINUE() do {} while (0)
11102
11103/** Internal macro. */
11104#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11105 do \
11106 { \
11107 VBOXSTRICTRC rcStrict2 = a_Expr; \
11108 if (rcStrict2 != VINF_SUCCESS) \
11109 return rcStrict2; \
11110 } while (0)
11111
11112
11113#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11114#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11115#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11116#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11117#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11118#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11119#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11120#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11121#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11122 do { \
11123 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11124 return iemRaiseDeviceNotAvailable(pVCpu); \
11125 } while (0)
11126#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11127 do { \
11128 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11129 return iemRaiseDeviceNotAvailable(pVCpu); \
11130 } while (0)
11131#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11132 do { \
11133 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11134 return iemRaiseMathFault(pVCpu); \
11135 } while (0)
11136#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11137 do { \
11138 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11139 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11140 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11141 return iemRaiseUndefinedOpcode(pVCpu); \
11142 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11143 return iemRaiseDeviceNotAvailable(pVCpu); \
11144 } while (0)
11145#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11146 do { \
11147 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11148 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11149 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11150 return iemRaiseUndefinedOpcode(pVCpu); \
11151 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11152 return iemRaiseDeviceNotAvailable(pVCpu); \
11153 } while (0)
11154#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11155 do { \
11156 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11157 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11158 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11159 return iemRaiseUndefinedOpcode(pVCpu); \
11160 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11161 return iemRaiseDeviceNotAvailable(pVCpu); \
11162 } while (0)
11163#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11164 do { \
11165 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11166 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11167 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11168 return iemRaiseUndefinedOpcode(pVCpu); \
11169 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11170 return iemRaiseDeviceNotAvailable(pVCpu); \
11171 } while (0)
11172#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11173 do { \
11174 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11175 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11176 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11177 return iemRaiseUndefinedOpcode(pVCpu); \
11178 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11179 return iemRaiseDeviceNotAvailable(pVCpu); \
11180 } while (0)
11181#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11182 do { \
11183 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11184 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11185 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11186 return iemRaiseUndefinedOpcode(pVCpu); \
11187 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11188 return iemRaiseDeviceNotAvailable(pVCpu); \
11189 } while (0)
11190#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11191 do { \
11192 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11193 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11194 return iemRaiseUndefinedOpcode(pVCpu); \
11195 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11196 return iemRaiseDeviceNotAvailable(pVCpu); \
11197 } while (0)
11198#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11199 do { \
11200 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11201 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11202 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11203 return iemRaiseUndefinedOpcode(pVCpu); \
11204 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11205 return iemRaiseDeviceNotAvailable(pVCpu); \
11206 } while (0)
11207#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11208 do { \
11209 if (pVCpu->iem.s.uCpl != 0) \
11210 return iemRaiseGeneralProtectionFault0(pVCpu); \
11211 } while (0)
11212#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11213 do { \
11214 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11215 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11216 } while (0)
11217
11218
11219#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11220#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11221#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11222#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11223#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11224#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11225#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11226 uint32_t a_Name; \
11227 uint32_t *a_pName = &a_Name
11228#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11229 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11230
11231#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11232#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11233
11234#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11235#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11236#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11237#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11238#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11239#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11240#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11241#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11242#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11243#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11244#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11245#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11246#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11247#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11248#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11249#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11250#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11251#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11252#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11253#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11254#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11255#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11256#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11257#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11258#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11259#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11260#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11261#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11262#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11263/** @note Not for IOPL or IF testing or modification. */
11264#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11265#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11266#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11267#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11268
11269#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11270#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11271#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11272#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11273#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11274#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11275#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11276#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11277#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11278#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11279#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11280 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11281
11282#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11283#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11284/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11285 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11286#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11287#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11288/** @note Not for IOPL or IF testing or modification. */
11289#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11290
11291#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11292#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11293#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11294 do { \
11295 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11296 *pu32Reg += (a_u32Value); \
11297 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11298 } while (0)
11299#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11300
11301#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11302#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11303#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11304 do { \
11305 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11306 *pu32Reg -= (a_u32Value); \
11307 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11308 } while (0)
11309#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11310#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11311
11312#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11313#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11314#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11315#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11316#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11317#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11318#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11319
11320#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11321#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11322#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11323#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11324
11325#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11326#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11327#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11328
11329#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11330#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11331#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11332
11333#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11334#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11335#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11336
11337#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11338#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11339#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11340
11341#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11342
11343#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11344
11345#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11346#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11347#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11348 do { \
11349 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11350 *pu32Reg &= (a_u32Value); \
11351 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11352 } while (0)
11353#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11354
11355#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11356#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11357#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11358 do { \
11359 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11360 *pu32Reg |= (a_u32Value); \
11361 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11362 } while (0)
11363#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11364
11365
11366/** @note Not for IOPL or IF modification. */
11367#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11368/** @note Not for IOPL or IF modification. */
11369#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11370/** @note Not for IOPL or IF modification. */
11371#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11372
11373#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11374
11375/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11376#define IEM_MC_FPU_TO_MMX_MODE() do { \
11377 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11378 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11379 } while (0)
11380
11381/** Switches the FPU state from MMX mode (FTW=0xffff). */
11382#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11383 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11384 } while (0)
11385
11386#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11387 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11388#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11389 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11390#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11391 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11392 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11393 } while (0)
11394#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11395 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11396 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11397 } while (0)
11398#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11399 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11400#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11401 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11402#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11403 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11404
11405#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11406 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11407 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11408 } while (0)
11409#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11410 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11411#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11412 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11413#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11414 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11415#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11416 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11417 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11418 } while (0)
11419#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11420 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11421#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11422 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11423 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11424 } while (0)
11425#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11426 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11427#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11428 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11429 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11430 } while (0)
11431#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11432 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11433#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11434 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11435#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11436 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11437#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11438 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11439#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11440 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11441 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11442 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11443 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11444 } while (0)
11445
11446#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11447 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11448 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11449 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11450 } while (0)
11451#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11452 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11453 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11454 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11455 } while (0)
11456#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11457 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11458 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11459 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11460 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11461 } while (0)
11462#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11463 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11464 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11465 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11466 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11467 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11468 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11469 } while (0)
11470
11471#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11472#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11473 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11474 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11475 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11476 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11477 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11478 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11479 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11480 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11481 } while (0)
11482#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11483 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11484 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11485 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11486 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11487 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11489 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11490 } while (0)
11491#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11492 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11493 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11494 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11495 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11496 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11497 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11498 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11499 } while (0)
11500#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11501 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11502 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11503 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11504 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11505 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11506 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11507 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11508 } while (0)
11509
11510#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11511 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11512#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11513 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11514#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11515 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11516#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11517 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11518 uintptr_t const iYRegTmp = (a_iYReg); \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11521 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11522 } while (0)
11523
11524#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11525 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11526 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11527 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11528 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11530 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11533 } while (0)
11534#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11535 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11536 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11540 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11542 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11543 } while (0)
11544#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11545 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11546 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11547 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11548 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11550 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11552 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11553 } while (0)
11554
11555#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11556 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11557 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11558 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11559 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11560 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11561 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11563 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11565 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11566 } while (0)
11567#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11568 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11569 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11570 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11571 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11576 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11577 } while (0)
11578#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11579 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11580 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11581 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11582 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11587 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11588 } while (0)
11589#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11590 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11591 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11592 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11593 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11595 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11597 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11598 } while (0)
11599
11600#ifndef IEM_WITH_SETJMP
11601# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11602 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11603# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11604 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11605# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11607#else
11608# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11609 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11610# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11611 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11612# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11613 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11614#endif
11615
11616#ifndef IEM_WITH_SETJMP
11617# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11619# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11620 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11621# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11622 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11623#else
11624# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11625 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11626# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11627 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11628# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11629 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11630#endif
11631
11632#ifndef IEM_WITH_SETJMP
11633# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11634 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11637# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11638 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11639#else
11640# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11641 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11642# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11643 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11644# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11645 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11646#endif
11647
11648#ifdef SOME_UNUSED_FUNCTION
11649# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11651#endif
11652
11653#ifndef IEM_WITH_SETJMP
11654# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11656# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11657 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11658# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11660# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11662#else
11663# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11664 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11665# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11666 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11667# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11668 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11670 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11671#endif
11672
11673#ifndef IEM_WITH_SETJMP
11674# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11676# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11680#else
11681# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11682 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11683# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11684 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11686 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11687#endif
11688
11689#ifndef IEM_WITH_SETJMP
11690# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11692# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11694#else
11695# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11696 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11697# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11698 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11699#endif
11700
11701#ifndef IEM_WITH_SETJMP
11702# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11704# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11706#else
11707# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11708 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11709# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11710 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11711#endif
11712
11713
11714
11715#ifndef IEM_WITH_SETJMP
11716# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11717 do { \
11718 uint8_t u8Tmp; \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11720 (a_u16Dst) = u8Tmp; \
11721 } while (0)
11722# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11723 do { \
11724 uint8_t u8Tmp; \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11726 (a_u32Dst) = u8Tmp; \
11727 } while (0)
11728# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11729 do { \
11730 uint8_t u8Tmp; \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11732 (a_u64Dst) = u8Tmp; \
11733 } while (0)
11734# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11735 do { \
11736 uint16_t u16Tmp; \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11738 (a_u32Dst) = u16Tmp; \
11739 } while (0)
11740# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 do { \
11742 uint16_t u16Tmp; \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11744 (a_u64Dst) = u16Tmp; \
11745 } while (0)
11746# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 do { \
11748 uint32_t u32Tmp; \
11749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11750 (a_u64Dst) = u32Tmp; \
11751 } while (0)
11752#else /* IEM_WITH_SETJMP */
11753# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11754 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11760 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765#endif /* IEM_WITH_SETJMP */
11766
11767#ifndef IEM_WITH_SETJMP
11768# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint8_t u8Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u16Dst) = (int8_t)u8Tmp; \
11773 } while (0)
11774# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11775 do { \
11776 uint8_t u8Tmp; \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11778 (a_u32Dst) = (int8_t)u8Tmp; \
11779 } while (0)
11780# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint8_t u8Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u64Dst) = (int8_t)u8Tmp; \
11785 } while (0)
11786# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11787 do { \
11788 uint16_t u16Tmp; \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11790 (a_u32Dst) = (int16_t)u16Tmp; \
11791 } while (0)
11792# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11793 do { \
11794 uint16_t u16Tmp; \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11796 (a_u64Dst) = (int16_t)u16Tmp; \
11797 } while (0)
11798# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11799 do { \
11800 uint32_t u32Tmp; \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11802 (a_u64Dst) = (int32_t)u32Tmp; \
11803 } while (0)
11804#else /* IEM_WITH_SETJMP */
11805# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11806 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11807# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11808 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11809# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11813# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11814 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11815# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11816 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11817#endif /* IEM_WITH_SETJMP */
11818
11819#ifndef IEM_WITH_SETJMP
11820# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11821 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11822# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11824# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11826# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11828#else
11829# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11830 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11831# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11832 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11833# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11834 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11835# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11836 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11837#endif
11838
11839#ifndef IEM_WITH_SETJMP
11840# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11842# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11844# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11846# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11848#else
11849# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11850 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11851# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11852 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11853# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11854 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11855# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11856 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11857#endif
11858
11859#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11860#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11861#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11862#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11863#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11864#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11865#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11866 do { \
11867 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11868 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11869 } while (0)
11870
11871#ifndef IEM_WITH_SETJMP
11872# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11874# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11876#else
11877# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11878 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11879# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11880 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11881#endif
11882
11883#ifndef IEM_WITH_SETJMP
11884# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11885 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11886# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11888#else
11889# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11890 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11891# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11892 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11893#endif
11894
11895
11896#define IEM_MC_PUSH_U16(a_u16Value) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11898#define IEM_MC_PUSH_U32(a_u32Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11900#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11902#define IEM_MC_PUSH_U64(a_u64Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11904
11905#define IEM_MC_POP_U16(a_pu16Value) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11907#define IEM_MC_POP_U32(a_pu32Value) \
11908 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11909#define IEM_MC_POP_U64(a_pu64Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11911
11912/** Maps guest memory for direct or bounce buffered access.
11913 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11914 * @remarks May return.
11915 */
11916#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11917 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11918
11919/** Maps guest memory for direct or bounce buffered access.
11920 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11921 * @remarks May return.
11922 */
11923#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11925
11926/** Commits the memory and unmaps the guest memory.
11927 * @remarks May return.
11928 */
11929#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11931
11932/** Commits the memory and unmaps the guest memory unless the FPU status word
11933 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11934 * that would cause FLD not to store.
11935 *
11936 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11937 * store, while \#P will not.
11938 *
11939 * @remarks May in theory return - for now.
11940 */
11941#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11942 do { \
11943 if ( !(a_u16FSW & X86_FSW_ES) \
11944 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11945 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11947 } while (0)
11948
11949/** Calculate efficient address from R/M. */
11950#ifndef IEM_WITH_SETJMP
11951# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11952 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11953#else
11954# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11955 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11956#endif
11957
11958#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11959#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11960#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11961#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11962#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11963#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11964#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11965
11966/**
11967 * Defers the rest of the instruction emulation to a C implementation routine
11968 * and returns, only taking the standard parameters.
11969 *
11970 * @param a_pfnCImpl The pointer to the C routine.
11971 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11972 */
11973#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11974
11975/**
11976 * Defers the rest of instruction emulation to a C implementation routine and
11977 * returns, taking one argument in addition to the standard ones.
11978 *
11979 * @param a_pfnCImpl The pointer to the C routine.
11980 * @param a0 The argument.
11981 */
11982#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11983
11984/**
11985 * Defers the rest of the instruction emulation to a C implementation routine
11986 * and returns, taking two arguments in addition to the standard ones.
11987 *
11988 * @param a_pfnCImpl The pointer to the C routine.
11989 * @param a0 The first extra argument.
11990 * @param a1 The second extra argument.
11991 */
11992#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11993
11994/**
11995 * Defers the rest of the instruction emulation to a C implementation routine
11996 * and returns, taking three arguments in addition to the standard ones.
11997 *
11998 * @param a_pfnCImpl The pointer to the C routine.
11999 * @param a0 The first extra argument.
12000 * @param a1 The second extra argument.
12001 * @param a2 The third extra argument.
12002 */
12003#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12004
12005/**
12006 * Defers the rest of the instruction emulation to a C implementation routine
12007 * and returns, taking four arguments in addition to the standard ones.
12008 *
12009 * @param a_pfnCImpl The pointer to the C routine.
12010 * @param a0 The first extra argument.
12011 * @param a1 The second extra argument.
12012 * @param a2 The third extra argument.
12013 * @param a3 The fourth extra argument.
12014 */
12015#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12016
12017/**
12018 * Defers the rest of the instruction emulation to a C implementation routine
12019 * and returns, taking two arguments in addition to the standard ones.
12020 *
12021 * @param a_pfnCImpl The pointer to the C routine.
12022 * @param a0 The first extra argument.
12023 * @param a1 The second extra argument.
12024 * @param a2 The third extra argument.
12025 * @param a3 The fourth extra argument.
12026 * @param a4 The fifth extra argument.
12027 */
12028#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12029
12030/**
12031 * Defers the entire instruction emulation to a C implementation routine and
12032 * returns, only taking the standard parameters.
12033 *
12034 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12035 *
12036 * @param a_pfnCImpl The pointer to the C routine.
12037 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12038 */
12039#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12040
12041/**
12042 * Defers the entire instruction emulation to a C implementation routine and
12043 * returns, taking one argument in addition to the standard ones.
12044 *
12045 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12046 *
12047 * @param a_pfnCImpl The pointer to the C routine.
12048 * @param a0 The argument.
12049 */
12050#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12051
12052/**
12053 * Defers the entire instruction emulation to a C implementation routine and
12054 * returns, taking two arguments in addition to the standard ones.
12055 *
12056 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @param a0 The first extra argument.
12060 * @param a1 The second extra argument.
12061 */
12062#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12063
12064/**
12065 * Defers the entire instruction emulation to a C implementation routine and
12066 * returns, taking three arguments in addition to the standard ones.
12067 *
12068 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The first extra argument.
12072 * @param a1 The second extra argument.
12073 * @param a2 The third extra argument.
12074 */
12075#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12076
12077/**
12078 * Calls a FPU assembly implementation taking one visible argument.
12079 *
12080 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12081 * @param a0 The first extra argument.
12082 */
12083#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12084 do { \
12085 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12086 } while (0)
12087
12088/**
12089 * Calls a FPU assembly implementation taking two visible arguments.
12090 *
12091 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12092 * @param a0 The first extra argument.
12093 * @param a1 The second extra argument.
12094 */
12095#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12096 do { \
12097 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12098 } while (0)
12099
12100/**
12101 * Calls a FPU assembly implementation taking three visible arguments.
12102 *
12103 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12104 * @param a0 The first extra argument.
12105 * @param a1 The second extra argument.
12106 * @param a2 The third extra argument.
12107 */
12108#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12109 do { \
12110 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12111 } while (0)
12112
12113#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12114 do { \
12115 (a_FpuData).FSW = (a_FSW); \
12116 (a_FpuData).r80Result = *(a_pr80Value); \
12117 } while (0)
12118
12119/** Pushes FPU result onto the stack. */
12120#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12121 iemFpuPushResult(pVCpu, &a_FpuData)
12122/** Pushes FPU result onto the stack and sets the FPUDP. */
12123#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12124 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12125
12126/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12127#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12128 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12129
12130/** Stores FPU result in a stack register. */
12131#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12132 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12133/** Stores FPU result in a stack register and pops the stack. */
12134#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12135 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12136/** Stores FPU result in a stack register and sets the FPUDP. */
12137#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12138 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12139/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12140 * stack. */
12141#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12142 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12143
12144/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12145#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12146 iemFpuUpdateOpcodeAndIp(pVCpu)
12147/** Free a stack register (for FFREE and FFREEP). */
12148#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12149 iemFpuStackFree(pVCpu, a_iStReg)
12150/** Increment the FPU stack pointer. */
12151#define IEM_MC_FPU_STACK_INC_TOP() \
12152 iemFpuStackIncTop(pVCpu)
12153/** Decrement the FPU stack pointer. */
12154#define IEM_MC_FPU_STACK_DEC_TOP() \
12155 iemFpuStackDecTop(pVCpu)
12156
12157/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12158#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12159 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12160/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12161#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12162 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12163/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12164#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12165 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12166/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12167#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12168 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12169/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12170 * stack. */
12171#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12172 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12173/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12174#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12175 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12176
12177/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12178#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12179 iemFpuStackUnderflow(pVCpu, a_iStDst)
12180/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12181 * stack. */
12182#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12183 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12184/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12185 * FPUDS. */
12186#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12187 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12188/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12189 * FPUDS. Pops stack. */
12190#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12191 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12192/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12193 * stack twice. */
12194#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12195 iemFpuStackUnderflowThenPopPop(pVCpu)
12196/** Raises a FPU stack underflow exception for an instruction pushing a result
12197 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12198#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12199 iemFpuStackPushUnderflow(pVCpu)
12200/** Raises a FPU stack underflow exception for an instruction pushing a result
12201 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12202#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12203 iemFpuStackPushUnderflowTwo(pVCpu)
12204
12205/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12206 * FPUIP, FPUCS and FOP. */
12207#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12208 iemFpuStackPushOverflow(pVCpu)
12209/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12210 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12211#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12212 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12213/** Prepares for using the FPU state.
12214 * Ensures that we can use the host FPU in the current context (RC+R0.
12215 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12216#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12217/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12218#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12219/** Actualizes the guest FPU state so it can be accessed and modified. */
12220#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12221
12222/** Prepares for using the SSE state.
12223 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12224 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12225#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12226/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12227#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12228/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12229#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12230
12231/** Prepares for using the AVX state.
12232 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12233 * Ensures the guest AVX state in the CPUMCTX is up to date.
12234 * @note This will include the AVX512 state too when support for it is added
12235 * due to the zero extending feature of VEX instruction. */
12236#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12237/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12238#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12239/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12240#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12241
12242/**
12243 * Calls a MMX assembly implementation taking two visible arguments.
12244 *
12245 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12246 * @param a0 The first extra argument.
12247 * @param a1 The second extra argument.
12248 */
12249#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12250 do { \
12251 IEM_MC_PREPARE_FPU_USAGE(); \
12252 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12253 } while (0)
12254
12255/**
12256 * Calls a MMX assembly implementation taking three visible arguments.
12257 *
12258 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12259 * @param a0 The first extra argument.
12260 * @param a1 The second extra argument.
12261 * @param a2 The third extra argument.
12262 */
12263#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12264 do { \
12265 IEM_MC_PREPARE_FPU_USAGE(); \
12266 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12267 } while (0)
12268
12269
12270/**
12271 * Calls a SSE assembly implementation taking two visible arguments.
12272 *
12273 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12274 * @param a0 The first extra argument.
12275 * @param a1 The second extra argument.
12276 */
12277#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12278 do { \
12279 IEM_MC_PREPARE_SSE_USAGE(); \
12280 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12281 } while (0)
12282
12283/**
12284 * Calls a SSE assembly implementation taking three visible arguments.
12285 *
12286 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12287 * @param a0 The first extra argument.
12288 * @param a1 The second extra argument.
12289 * @param a2 The third extra argument.
12290 */
12291#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12292 do { \
12293 IEM_MC_PREPARE_SSE_USAGE(); \
12294 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12295 } while (0)
12296
12297
12298/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12299 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12300#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12301 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12302
12303/**
12304 * Calls a AVX assembly implementation taking two visible arguments.
12305 *
12306 * There is one implicit zero'th argument, a pointer to the extended state.
12307 *
12308 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12309 * @param a1 The first extra argument.
12310 * @param a2 The second extra argument.
12311 */
12312#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12313 do { \
12314 IEM_MC_PREPARE_AVX_USAGE(); \
12315 a_pfnAImpl(pXState, (a1), (a2)); \
12316 } while (0)
12317
12318/**
12319 * Calls a AVX assembly implementation taking three visible arguments.
12320 *
12321 * There is one implicit zero'th argument, a pointer to the extended state.
12322 *
12323 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12324 * @param a1 The first extra argument.
12325 * @param a2 The second extra argument.
12326 * @param a3 The third extra argument.
12327 */
12328#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12329 do { \
12330 IEM_MC_PREPARE_AVX_USAGE(); \
12331 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12332 } while (0)
12333
12334/** @note Not for IOPL or IF testing. */
12335#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12338/** @note Not for IOPL or IF testing. */
12339#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12342/** @note Not for IOPL or IF testing. */
12343#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12344 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12345 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12346/** @note Not for IOPL or IF testing. */
12347#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12348 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12349 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12350/** @note Not for IOPL or IF testing. */
12351#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12352 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12353 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12354 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12355/** @note Not for IOPL or IF testing. */
12356#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12357 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12358 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12359 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12360#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12361#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12362#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12363/** @note Not for IOPL or IF testing. */
12364#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12365 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12366 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12367/** @note Not for IOPL or IF testing. */
12368#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12369 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12370 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12371/** @note Not for IOPL or IF testing. */
12372#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12373 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12374 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12375/** @note Not for IOPL or IF testing. */
12376#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12377 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12378 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12379/** @note Not for IOPL or IF testing. */
12380#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12381 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12382 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12385 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12386 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12387#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12388#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12389
12390#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12391 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12392#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12393 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12394#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12395 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12396#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12397 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12398#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12399 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12400#define IEM_MC_IF_FCW_IM() \
12401 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12402
12403#define IEM_MC_ELSE() } else {
12404#define IEM_MC_ENDIF() } do {} while (0)
12405
12406/** @} */
12407
12408
12409/** @name Opcode Debug Helpers.
12410 * @{
12411 */
12412#ifdef VBOX_WITH_STATISTICS
12413# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12414#else
12415# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12416#endif
12417
12418#ifdef DEBUG
12419# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12420 do { \
12421 IEMOP_INC_STATS(a_Stats); \
12422 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12423 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12424 } while (0)
12425
12426# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12427 do { \
12428 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12429 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12430 (void)RT_CONCAT(OP_,a_Upper); \
12431 (void)(a_fDisHints); \
12432 (void)(a_fIemHints); \
12433 } while (0)
12434
12435# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12436 do { \
12437 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12438 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12439 (void)RT_CONCAT(OP_,a_Upper); \
12440 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12441 (void)(a_fDisHints); \
12442 (void)(a_fIemHints); \
12443 } while (0)
12444
12445# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12446 do { \
12447 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12448 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12449 (void)RT_CONCAT(OP_,a_Upper); \
12450 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12451 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12452 (void)(a_fDisHints); \
12453 (void)(a_fIemHints); \
12454 } while (0)
12455
12456# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12457 do { \
12458 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12459 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12460 (void)RT_CONCAT(OP_,a_Upper); \
12461 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12462 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12463 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12464 (void)(a_fDisHints); \
12465 (void)(a_fIemHints); \
12466 } while (0)
12467
12468# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12469 do { \
12470 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12471 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12472 (void)RT_CONCAT(OP_,a_Upper); \
12473 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12474 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12475 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12476 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12477 (void)(a_fDisHints); \
12478 (void)(a_fIemHints); \
12479 } while (0)
12480
12481#else
12482# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12483
12484# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12485 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12486# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12487 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12488# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12489 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12490# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12491 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12492# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12493 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12494
12495#endif
12496
12497#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12498 IEMOP_MNEMONIC0EX(a_Lower, \
12499 #a_Lower, \
12500 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12501#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12502 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12503 #a_Lower " " #a_Op1, \
12504 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12505#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12506 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12507 #a_Lower " " #a_Op1 "," #a_Op2, \
12508 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12509#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12510 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12511 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12512 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12513#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12514 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12515 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12516 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12517
12518/** @} */
12519
12520
12521/** @name Opcode Helpers.
12522 * @{
12523 */
12524
12525#ifdef IN_RING3
12526# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12527 do { \
12528 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12529 else \
12530 { \
12531 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12532 return IEMOP_RAISE_INVALID_OPCODE(); \
12533 } \
12534 } while (0)
12535#else
12536# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12537 do { \
12538 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12539 else return IEMOP_RAISE_INVALID_OPCODE(); \
12540 } while (0)
12541#endif
12542
12543/** The instruction requires a 186 or later. */
12544#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12545# define IEMOP_HLP_MIN_186() do { } while (0)
12546#else
12547# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12548#endif
12549
12550/** The instruction requires a 286 or later. */
12551#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12552# define IEMOP_HLP_MIN_286() do { } while (0)
12553#else
12554# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12555#endif
12556
12557/** The instruction requires a 386 or later. */
12558#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12559# define IEMOP_HLP_MIN_386() do { } while (0)
12560#else
12561# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12562#endif
12563
12564/** The instruction requires a 386 or later if the given expression is true. */
12565#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12566# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12567#else
12568# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12569#endif
12570
12571/** The instruction requires a 486 or later. */
12572#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12573# define IEMOP_HLP_MIN_486() do { } while (0)
12574#else
12575# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12576#endif
12577
12578/** The instruction requires a Pentium (586) or later. */
12579#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12580# define IEMOP_HLP_MIN_586() do { } while (0)
12581#else
12582# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12583#endif
12584
12585/** The instruction requires a PentiumPro (686) or later. */
12586#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12587# define IEMOP_HLP_MIN_686() do { } while (0)
12588#else
12589# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12590#endif
12591
12592
12593/** The instruction raises an \#UD in real and V8086 mode. */
12594#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12595 do \
12596 { \
12597 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12598 else return IEMOP_RAISE_INVALID_OPCODE(); \
12599 } while (0)
12600
12601/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12602 * 64-bit mode. */
12603#define IEMOP_HLP_NO_64BIT() \
12604 do \
12605 { \
12606 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12607 return IEMOP_RAISE_INVALID_OPCODE(); \
12608 } while (0)
12609
12610/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12611 * 64-bit mode. */
12612#define IEMOP_HLP_ONLY_64BIT() \
12613 do \
12614 { \
12615 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12616 return IEMOP_RAISE_INVALID_OPCODE(); \
12617 } while (0)
12618
12619/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12620#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12621 do \
12622 { \
12623 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12624 iemRecalEffOpSize64Default(pVCpu); \
12625 } while (0)
12626
12627/** The instruction has 64-bit operand size if 64-bit mode. */
12628#define IEMOP_HLP_64BIT_OP_SIZE() \
12629 do \
12630 { \
12631 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12632 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12633 } while (0)
12634
12635/** Only a REX prefix immediately preceeding the first opcode byte takes
12636 * effect. This macro helps ensuring this as well as logging bad guest code. */
12637#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12638 do \
12639 { \
12640 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12641 { \
12642 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12643 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12644 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12645 pVCpu->iem.s.uRexB = 0; \
12646 pVCpu->iem.s.uRexIndex = 0; \
12647 pVCpu->iem.s.uRexReg = 0; \
12648 iemRecalEffOpSize(pVCpu); \
12649 } \
12650 } while (0)
12651
12652/**
12653 * Done decoding.
12654 */
12655#define IEMOP_HLP_DONE_DECODING() \
12656 do \
12657 { \
12658 /*nothing for now, maybe later... */ \
12659 } while (0)
12660
12661/**
12662 * Done decoding, raise \#UD exception if lock prefix present.
12663 */
12664#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12665 do \
12666 { \
12667 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12668 { /* likely */ } \
12669 else \
12670 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12671 } while (0)
12672
12673
12674/**
12675 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12676 * repnz or size prefixes are present, or if in real or v8086 mode.
12677 */
12678#define IEMOP_HLP_DONE_VEX_DECODING() \
12679 do \
12680 { \
12681 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12682 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12683 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12684 { /* likely */ } \
12685 else \
12686 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12687 } while (0)
12688
12689/**
12690 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12691 * repnz or size prefixes are present, or if in real or v8086 mode.
12692 */
12693#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12694 do \
12695 { \
12696 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12697 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12698 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12699 && pVCpu->iem.s.uVexLength == 0)) \
12700 { /* likely */ } \
12701 else \
12702 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12703 } while (0)
12704
12705
12706/**
12707 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12708 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12709 * register 0, or if in real or v8086 mode.
12710 */
12711#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12712 do \
12713 { \
12714 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12715 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12716 && !pVCpu->iem.s.uVex3rdReg \
12717 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12718 { /* likely */ } \
12719 else \
12720 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12721 } while (0)
12722
12723/**
12724 * Done decoding VEX, no V, L=0.
12725 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12726 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12727 */
12728#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12729 do \
12730 { \
12731 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12732 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12733 && pVCpu->iem.s.uVexLength == 0 \
12734 && pVCpu->iem.s.uVex3rdReg == 0 \
12735 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12736 { /* likely */ } \
12737 else \
12738 return IEMOP_RAISE_INVALID_OPCODE(); \
12739 } while (0)
12740
12741#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12742 do \
12743 { \
12744 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12745 { /* likely */ } \
12746 else \
12747 { \
12748 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12749 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12750 } \
12751 } while (0)
12752#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12753 do \
12754 { \
12755 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12756 { /* likely */ } \
12757 else \
12758 { \
12759 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12760 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12761 } \
12762 } while (0)
12763
12764/**
12765 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12766 * are present.
12767 */
12768#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12769 do \
12770 { \
12771 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12772 { /* likely */ } \
12773 else \
12774 return IEMOP_RAISE_INVALID_OPCODE(); \
12775 } while (0)
12776
12777
12778#ifdef VBOX_WITH_NESTED_HWVIRT
12779/** Check and handles SVM nested-guest control & instruction intercept. */
12780# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12781 do \
12782 { \
12783 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12784 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12785 } while (0)
12786
12787/** Check and handle SVM nested-guest CR0 read intercept. */
12788# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12789 do \
12790 { \
12791 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12792 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12793 } while (0)
12794
12795#else /* !VBOX_WITH_NESTED_HWVIRT */
12796# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12797# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12798#endif /* !VBOX_WITH_NESTED_HWVIRT */
12799
12800
12801/**
12802 * Calculates the effective address of a ModR/M memory operand.
12803 *
12804 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12805 *
12806 * @return Strict VBox status code.
12807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12808 * @param bRm The ModRM byte.
12809 * @param cbImm The size of any immediate following the
12810 * effective address opcode bytes. Important for
12811 * RIP relative addressing.
12812 * @param pGCPtrEff Where to return the effective address.
12813 */
12814IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12815{
12816 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12817 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12818# define SET_SS_DEF() \
12819 do \
12820 { \
12821 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12822 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12823 } while (0)
12824
12825 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12826 {
12827/** @todo Check the effective address size crap! */
12828 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12829 {
12830 uint16_t u16EffAddr;
12831
12832 /* Handle the disp16 form with no registers first. */
12833 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12834 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12835 else
12836 {
12837 /* Get the displacment. */
12838 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12839 {
12840 case 0: u16EffAddr = 0; break;
12841 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12842 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12843 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12844 }
12845
12846 /* Add the base and index registers to the disp. */
12847 switch (bRm & X86_MODRM_RM_MASK)
12848 {
12849 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12850 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12851 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12852 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12853 case 4: u16EffAddr += pCtx->si; break;
12854 case 5: u16EffAddr += pCtx->di; break;
12855 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12856 case 7: u16EffAddr += pCtx->bx; break;
12857 }
12858 }
12859
12860 *pGCPtrEff = u16EffAddr;
12861 }
12862 else
12863 {
12864 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12865 uint32_t u32EffAddr;
12866
12867 /* Handle the disp32 form with no registers first. */
12868 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12869 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12870 else
12871 {
12872 /* Get the register (or SIB) value. */
12873 switch ((bRm & X86_MODRM_RM_MASK))
12874 {
12875 case 0: u32EffAddr = pCtx->eax; break;
12876 case 1: u32EffAddr = pCtx->ecx; break;
12877 case 2: u32EffAddr = pCtx->edx; break;
12878 case 3: u32EffAddr = pCtx->ebx; break;
12879 case 4: /* SIB */
12880 {
12881 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12882
12883 /* Get the index and scale it. */
12884 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12885 {
12886 case 0: u32EffAddr = pCtx->eax; break;
12887 case 1: u32EffAddr = pCtx->ecx; break;
12888 case 2: u32EffAddr = pCtx->edx; break;
12889 case 3: u32EffAddr = pCtx->ebx; break;
12890 case 4: u32EffAddr = 0; /*none */ break;
12891 case 5: u32EffAddr = pCtx->ebp; break;
12892 case 6: u32EffAddr = pCtx->esi; break;
12893 case 7: u32EffAddr = pCtx->edi; break;
12894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12895 }
12896 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12897
12898 /* add base */
12899 switch (bSib & X86_SIB_BASE_MASK)
12900 {
12901 case 0: u32EffAddr += pCtx->eax; break;
12902 case 1: u32EffAddr += pCtx->ecx; break;
12903 case 2: u32EffAddr += pCtx->edx; break;
12904 case 3: u32EffAddr += pCtx->ebx; break;
12905 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12906 case 5:
12907 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12908 {
12909 u32EffAddr += pCtx->ebp;
12910 SET_SS_DEF();
12911 }
12912 else
12913 {
12914 uint32_t u32Disp;
12915 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12916 u32EffAddr += u32Disp;
12917 }
12918 break;
12919 case 6: u32EffAddr += pCtx->esi; break;
12920 case 7: u32EffAddr += pCtx->edi; break;
12921 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12922 }
12923 break;
12924 }
12925 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12926 case 6: u32EffAddr = pCtx->esi; break;
12927 case 7: u32EffAddr = pCtx->edi; break;
12928 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12929 }
12930
12931 /* Get and add the displacement. */
12932 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12933 {
12934 case 0:
12935 break;
12936 case 1:
12937 {
12938 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12939 u32EffAddr += i8Disp;
12940 break;
12941 }
12942 case 2:
12943 {
12944 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12945 u32EffAddr += u32Disp;
12946 break;
12947 }
12948 default:
12949 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12950 }
12951
12952 }
12953 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12954 *pGCPtrEff = u32EffAddr;
12955 else
12956 {
12957 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12958 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12959 }
12960 }
12961 }
12962 else
12963 {
12964 uint64_t u64EffAddr;
12965
12966 /* Handle the rip+disp32 form with no registers first. */
12967 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12968 {
12969 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12970 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12971 }
12972 else
12973 {
12974 /* Get the register (or SIB) value. */
12975 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12976 {
12977 case 0: u64EffAddr = pCtx->rax; break;
12978 case 1: u64EffAddr = pCtx->rcx; break;
12979 case 2: u64EffAddr = pCtx->rdx; break;
12980 case 3: u64EffAddr = pCtx->rbx; break;
12981 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12982 case 6: u64EffAddr = pCtx->rsi; break;
12983 case 7: u64EffAddr = pCtx->rdi; break;
12984 case 8: u64EffAddr = pCtx->r8; break;
12985 case 9: u64EffAddr = pCtx->r9; break;
12986 case 10: u64EffAddr = pCtx->r10; break;
12987 case 11: u64EffAddr = pCtx->r11; break;
12988 case 13: u64EffAddr = pCtx->r13; break;
12989 case 14: u64EffAddr = pCtx->r14; break;
12990 case 15: u64EffAddr = pCtx->r15; break;
12991 /* SIB */
12992 case 4:
12993 case 12:
12994 {
12995 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12996
12997 /* Get the index and scale it. */
12998 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12999 {
13000 case 0: u64EffAddr = pCtx->rax; break;
13001 case 1: u64EffAddr = pCtx->rcx; break;
13002 case 2: u64EffAddr = pCtx->rdx; break;
13003 case 3: u64EffAddr = pCtx->rbx; break;
13004 case 4: u64EffAddr = 0; /*none */ break;
13005 case 5: u64EffAddr = pCtx->rbp; break;
13006 case 6: u64EffAddr = pCtx->rsi; break;
13007 case 7: u64EffAddr = pCtx->rdi; break;
13008 case 8: u64EffAddr = pCtx->r8; break;
13009 case 9: u64EffAddr = pCtx->r9; break;
13010 case 10: u64EffAddr = pCtx->r10; break;
13011 case 11: u64EffAddr = pCtx->r11; break;
13012 case 12: u64EffAddr = pCtx->r12; break;
13013 case 13: u64EffAddr = pCtx->r13; break;
13014 case 14: u64EffAddr = pCtx->r14; break;
13015 case 15: u64EffAddr = pCtx->r15; break;
13016 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13017 }
13018 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13019
13020 /* add base */
13021 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13022 {
13023 case 0: u64EffAddr += pCtx->rax; break;
13024 case 1: u64EffAddr += pCtx->rcx; break;
13025 case 2: u64EffAddr += pCtx->rdx; break;
13026 case 3: u64EffAddr += pCtx->rbx; break;
13027 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13028 case 6: u64EffAddr += pCtx->rsi; break;
13029 case 7: u64EffAddr += pCtx->rdi; break;
13030 case 8: u64EffAddr += pCtx->r8; break;
13031 case 9: u64EffAddr += pCtx->r9; break;
13032 case 10: u64EffAddr += pCtx->r10; break;
13033 case 11: u64EffAddr += pCtx->r11; break;
13034 case 12: u64EffAddr += pCtx->r12; break;
13035 case 14: u64EffAddr += pCtx->r14; break;
13036 case 15: u64EffAddr += pCtx->r15; break;
13037 /* complicated encodings */
13038 case 5:
13039 case 13:
13040 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13041 {
13042 if (!pVCpu->iem.s.uRexB)
13043 {
13044 u64EffAddr += pCtx->rbp;
13045 SET_SS_DEF();
13046 }
13047 else
13048 u64EffAddr += pCtx->r13;
13049 }
13050 else
13051 {
13052 uint32_t u32Disp;
13053 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13054 u64EffAddr += (int32_t)u32Disp;
13055 }
13056 break;
13057 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13058 }
13059 break;
13060 }
13061 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13062 }
13063
13064 /* Get and add the displacement. */
13065 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13066 {
13067 case 0:
13068 break;
13069 case 1:
13070 {
13071 int8_t i8Disp;
13072 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13073 u64EffAddr += i8Disp;
13074 break;
13075 }
13076 case 2:
13077 {
13078 uint32_t u32Disp;
13079 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13080 u64EffAddr += (int32_t)u32Disp;
13081 break;
13082 }
13083 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13084 }
13085
13086 }
13087
13088 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13089 *pGCPtrEff = u64EffAddr;
13090 else
13091 {
13092 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13093 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13094 }
13095 }
13096
13097 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13098 return VINF_SUCCESS;
13099}
13100
13101
13102/**
13103 * Calculates the effective address of a ModR/M memory operand.
13104 *
13105 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13106 *
13107 * @return Strict VBox status code.
13108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13109 * @param bRm The ModRM byte.
13110 * @param cbImm The size of any immediate following the
13111 * effective address opcode bytes. Important for
13112 * RIP relative addressing.
13113 * @param pGCPtrEff Where to return the effective address.
13114 * @param offRsp RSP displacement.
13115 */
13116IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13117{
13118 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13119 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13120# define SET_SS_DEF() \
13121 do \
13122 { \
13123 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13124 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13125 } while (0)
13126
13127 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13128 {
13129/** @todo Check the effective address size crap! */
13130 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13131 {
13132 uint16_t u16EffAddr;
13133
13134 /* Handle the disp16 form with no registers first. */
13135 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13136 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13137 else
13138 {
13139 /* Get the displacment. */
13140 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13141 {
13142 case 0: u16EffAddr = 0; break;
13143 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13144 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13145 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13146 }
13147
13148 /* Add the base and index registers to the disp. */
13149 switch (bRm & X86_MODRM_RM_MASK)
13150 {
13151 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13152 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13153 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13154 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13155 case 4: u16EffAddr += pCtx->si; break;
13156 case 5: u16EffAddr += pCtx->di; break;
13157 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13158 case 7: u16EffAddr += pCtx->bx; break;
13159 }
13160 }
13161
13162 *pGCPtrEff = u16EffAddr;
13163 }
13164 else
13165 {
13166 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13167 uint32_t u32EffAddr;
13168
13169 /* Handle the disp32 form with no registers first. */
13170 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13171 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13172 else
13173 {
13174 /* Get the register (or SIB) value. */
13175 switch ((bRm & X86_MODRM_RM_MASK))
13176 {
13177 case 0: u32EffAddr = pCtx->eax; break;
13178 case 1: u32EffAddr = pCtx->ecx; break;
13179 case 2: u32EffAddr = pCtx->edx; break;
13180 case 3: u32EffAddr = pCtx->ebx; break;
13181 case 4: /* SIB */
13182 {
13183 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13184
13185 /* Get the index and scale it. */
13186 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13187 {
13188 case 0: u32EffAddr = pCtx->eax; break;
13189 case 1: u32EffAddr = pCtx->ecx; break;
13190 case 2: u32EffAddr = pCtx->edx; break;
13191 case 3: u32EffAddr = pCtx->ebx; break;
13192 case 4: u32EffAddr = 0; /*none */ break;
13193 case 5: u32EffAddr = pCtx->ebp; break;
13194 case 6: u32EffAddr = pCtx->esi; break;
13195 case 7: u32EffAddr = pCtx->edi; break;
13196 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13197 }
13198 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13199
13200 /* add base */
13201 switch (bSib & X86_SIB_BASE_MASK)
13202 {
13203 case 0: u32EffAddr += pCtx->eax; break;
13204 case 1: u32EffAddr += pCtx->ecx; break;
13205 case 2: u32EffAddr += pCtx->edx; break;
13206 case 3: u32EffAddr += pCtx->ebx; break;
13207 case 4:
13208 u32EffAddr += pCtx->esp + offRsp;
13209 SET_SS_DEF();
13210 break;
13211 case 5:
13212 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13213 {
13214 u32EffAddr += pCtx->ebp;
13215 SET_SS_DEF();
13216 }
13217 else
13218 {
13219 uint32_t u32Disp;
13220 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13221 u32EffAddr += u32Disp;
13222 }
13223 break;
13224 case 6: u32EffAddr += pCtx->esi; break;
13225 case 7: u32EffAddr += pCtx->edi; break;
13226 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13227 }
13228 break;
13229 }
13230 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13231 case 6: u32EffAddr = pCtx->esi; break;
13232 case 7: u32EffAddr = pCtx->edi; break;
13233 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13234 }
13235
13236 /* Get and add the displacement. */
13237 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13238 {
13239 case 0:
13240 break;
13241 case 1:
13242 {
13243 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13244 u32EffAddr += i8Disp;
13245 break;
13246 }
13247 case 2:
13248 {
13249 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13250 u32EffAddr += u32Disp;
13251 break;
13252 }
13253 default:
13254 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13255 }
13256
13257 }
13258 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13259 *pGCPtrEff = u32EffAddr;
13260 else
13261 {
13262 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13263 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13264 }
13265 }
13266 }
13267 else
13268 {
13269 uint64_t u64EffAddr;
13270
13271 /* Handle the rip+disp32 form with no registers first. */
13272 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13273 {
13274 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13275 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13276 }
13277 else
13278 {
13279 /* Get the register (or SIB) value. */
13280 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13281 {
13282 case 0: u64EffAddr = pCtx->rax; break;
13283 case 1: u64EffAddr = pCtx->rcx; break;
13284 case 2: u64EffAddr = pCtx->rdx; break;
13285 case 3: u64EffAddr = pCtx->rbx; break;
13286 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13287 case 6: u64EffAddr = pCtx->rsi; break;
13288 case 7: u64EffAddr = pCtx->rdi; break;
13289 case 8: u64EffAddr = pCtx->r8; break;
13290 case 9: u64EffAddr = pCtx->r9; break;
13291 case 10: u64EffAddr = pCtx->r10; break;
13292 case 11: u64EffAddr = pCtx->r11; break;
13293 case 13: u64EffAddr = pCtx->r13; break;
13294 case 14: u64EffAddr = pCtx->r14; break;
13295 case 15: u64EffAddr = pCtx->r15; break;
13296 /* SIB */
13297 case 4:
13298 case 12:
13299 {
13300 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13301
13302 /* Get the index and scale it. */
13303 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13304 {
13305 case 0: u64EffAddr = pCtx->rax; break;
13306 case 1: u64EffAddr = pCtx->rcx; break;
13307 case 2: u64EffAddr = pCtx->rdx; break;
13308 case 3: u64EffAddr = pCtx->rbx; break;
13309 case 4: u64EffAddr = 0; /*none */ break;
13310 case 5: u64EffAddr = pCtx->rbp; break;
13311 case 6: u64EffAddr = pCtx->rsi; break;
13312 case 7: u64EffAddr = pCtx->rdi; break;
13313 case 8: u64EffAddr = pCtx->r8; break;
13314 case 9: u64EffAddr = pCtx->r9; break;
13315 case 10: u64EffAddr = pCtx->r10; break;
13316 case 11: u64EffAddr = pCtx->r11; break;
13317 case 12: u64EffAddr = pCtx->r12; break;
13318 case 13: u64EffAddr = pCtx->r13; break;
13319 case 14: u64EffAddr = pCtx->r14; break;
13320 case 15: u64EffAddr = pCtx->r15; break;
13321 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13322 }
13323 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13324
13325 /* add base */
13326 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13327 {
13328 case 0: u64EffAddr += pCtx->rax; break;
13329 case 1: u64EffAddr += pCtx->rcx; break;
13330 case 2: u64EffAddr += pCtx->rdx; break;
13331 case 3: u64EffAddr += pCtx->rbx; break;
13332 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13333 case 6: u64EffAddr += pCtx->rsi; break;
13334 case 7: u64EffAddr += pCtx->rdi; break;
13335 case 8: u64EffAddr += pCtx->r8; break;
13336 case 9: u64EffAddr += pCtx->r9; break;
13337 case 10: u64EffAddr += pCtx->r10; break;
13338 case 11: u64EffAddr += pCtx->r11; break;
13339 case 12: u64EffAddr += pCtx->r12; break;
13340 case 14: u64EffAddr += pCtx->r14; break;
13341 case 15: u64EffAddr += pCtx->r15; break;
13342 /* complicated encodings */
13343 case 5:
13344 case 13:
13345 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13346 {
13347 if (!pVCpu->iem.s.uRexB)
13348 {
13349 u64EffAddr += pCtx->rbp;
13350 SET_SS_DEF();
13351 }
13352 else
13353 u64EffAddr += pCtx->r13;
13354 }
13355 else
13356 {
13357 uint32_t u32Disp;
13358 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13359 u64EffAddr += (int32_t)u32Disp;
13360 }
13361 break;
13362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13363 }
13364 break;
13365 }
13366 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13367 }
13368
13369 /* Get and add the displacement. */
13370 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13371 {
13372 case 0:
13373 break;
13374 case 1:
13375 {
13376 int8_t i8Disp;
13377 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13378 u64EffAddr += i8Disp;
13379 break;
13380 }
13381 case 2:
13382 {
13383 uint32_t u32Disp;
13384 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13385 u64EffAddr += (int32_t)u32Disp;
13386 break;
13387 }
13388 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13389 }
13390
13391 }
13392
13393 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13394 *pGCPtrEff = u64EffAddr;
13395 else
13396 {
13397 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13398 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13399 }
13400 }
13401
13402 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13403 return VINF_SUCCESS;
13404}
13405
13406
13407#ifdef IEM_WITH_SETJMP
13408/**
13409 * Calculates the effective address of a ModR/M memory operand.
13410 *
13411 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13412 *
13413 * May longjmp on internal error.
13414 *
13415 * @return The effective address.
13416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13417 * @param bRm The ModRM byte.
13418 * @param cbImm The size of any immediate following the
13419 * effective address opcode bytes. Important for
13420 * RIP relative addressing.
13421 */
13422IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13423{
13424 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13425 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13426# define SET_SS_DEF() \
13427 do \
13428 { \
13429 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13430 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13431 } while (0)
13432
13433 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13434 {
13435/** @todo Check the effective address size crap! */
13436 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13437 {
13438 uint16_t u16EffAddr;
13439
13440 /* Handle the disp16 form with no registers first. */
13441 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13442 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13443 else
13444 {
13445 /* Get the displacment. */
13446 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13447 {
13448 case 0: u16EffAddr = 0; break;
13449 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13450 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13451 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13452 }
13453
13454 /* Add the base and index registers to the disp. */
13455 switch (bRm & X86_MODRM_RM_MASK)
13456 {
13457 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13458 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13459 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13460 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13461 case 4: u16EffAddr += pCtx->si; break;
13462 case 5: u16EffAddr += pCtx->di; break;
13463 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13464 case 7: u16EffAddr += pCtx->bx; break;
13465 }
13466 }
13467
13468 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13469 return u16EffAddr;
13470 }
13471
13472 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13473 uint32_t u32EffAddr;
13474
13475 /* Handle the disp32 form with no registers first. */
13476 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13477 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13478 else
13479 {
13480 /* Get the register (or SIB) value. */
13481 switch ((bRm & X86_MODRM_RM_MASK))
13482 {
13483 case 0: u32EffAddr = pCtx->eax; break;
13484 case 1: u32EffAddr = pCtx->ecx; break;
13485 case 2: u32EffAddr = pCtx->edx; break;
13486 case 3: u32EffAddr = pCtx->ebx; break;
13487 case 4: /* SIB */
13488 {
13489 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13490
13491 /* Get the index and scale it. */
13492 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13493 {
13494 case 0: u32EffAddr = pCtx->eax; break;
13495 case 1: u32EffAddr = pCtx->ecx; break;
13496 case 2: u32EffAddr = pCtx->edx; break;
13497 case 3: u32EffAddr = pCtx->ebx; break;
13498 case 4: u32EffAddr = 0; /*none */ break;
13499 case 5: u32EffAddr = pCtx->ebp; break;
13500 case 6: u32EffAddr = pCtx->esi; break;
13501 case 7: u32EffAddr = pCtx->edi; break;
13502 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13503 }
13504 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13505
13506 /* add base */
13507 switch (bSib & X86_SIB_BASE_MASK)
13508 {
13509 case 0: u32EffAddr += pCtx->eax; break;
13510 case 1: u32EffAddr += pCtx->ecx; break;
13511 case 2: u32EffAddr += pCtx->edx; break;
13512 case 3: u32EffAddr += pCtx->ebx; break;
13513 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13514 case 5:
13515 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13516 {
13517 u32EffAddr += pCtx->ebp;
13518 SET_SS_DEF();
13519 }
13520 else
13521 {
13522 uint32_t u32Disp;
13523 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13524 u32EffAddr += u32Disp;
13525 }
13526 break;
13527 case 6: u32EffAddr += pCtx->esi; break;
13528 case 7: u32EffAddr += pCtx->edi; break;
13529 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13530 }
13531 break;
13532 }
13533 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13534 case 6: u32EffAddr = pCtx->esi; break;
13535 case 7: u32EffAddr = pCtx->edi; break;
13536 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13537 }
13538
13539 /* Get and add the displacement. */
13540 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13541 {
13542 case 0:
13543 break;
13544 case 1:
13545 {
13546 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13547 u32EffAddr += i8Disp;
13548 break;
13549 }
13550 case 2:
13551 {
13552 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13553 u32EffAddr += u32Disp;
13554 break;
13555 }
13556 default:
13557 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13558 }
13559 }
13560
13561 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13562 {
13563 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13564 return u32EffAddr;
13565 }
13566 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13567 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13568 return u32EffAddr & UINT16_MAX;
13569 }
13570
13571 uint64_t u64EffAddr;
13572
13573 /* Handle the rip+disp32 form with no registers first. */
13574 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13575 {
13576 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13577 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13578 }
13579 else
13580 {
13581 /* Get the register (or SIB) value. */
13582 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13583 {
13584 case 0: u64EffAddr = pCtx->rax; break;
13585 case 1: u64EffAddr = pCtx->rcx; break;
13586 case 2: u64EffAddr = pCtx->rdx; break;
13587 case 3: u64EffAddr = pCtx->rbx; break;
13588 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13589 case 6: u64EffAddr = pCtx->rsi; break;
13590 case 7: u64EffAddr = pCtx->rdi; break;
13591 case 8: u64EffAddr = pCtx->r8; break;
13592 case 9: u64EffAddr = pCtx->r9; break;
13593 case 10: u64EffAddr = pCtx->r10; break;
13594 case 11: u64EffAddr = pCtx->r11; break;
13595 case 13: u64EffAddr = pCtx->r13; break;
13596 case 14: u64EffAddr = pCtx->r14; break;
13597 case 15: u64EffAddr = pCtx->r15; break;
13598 /* SIB */
13599 case 4:
13600 case 12:
13601 {
13602 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13603
13604 /* Get the index and scale it. */
13605 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13606 {
13607 case 0: u64EffAddr = pCtx->rax; break;
13608 case 1: u64EffAddr = pCtx->rcx; break;
13609 case 2: u64EffAddr = pCtx->rdx; break;
13610 case 3: u64EffAddr = pCtx->rbx; break;
13611 case 4: u64EffAddr = 0; /*none */ break;
13612 case 5: u64EffAddr = pCtx->rbp; break;
13613 case 6: u64EffAddr = pCtx->rsi; break;
13614 case 7: u64EffAddr = pCtx->rdi; break;
13615 case 8: u64EffAddr = pCtx->r8; break;
13616 case 9: u64EffAddr = pCtx->r9; break;
13617 case 10: u64EffAddr = pCtx->r10; break;
13618 case 11: u64EffAddr = pCtx->r11; break;
13619 case 12: u64EffAddr = pCtx->r12; break;
13620 case 13: u64EffAddr = pCtx->r13; break;
13621 case 14: u64EffAddr = pCtx->r14; break;
13622 case 15: u64EffAddr = pCtx->r15; break;
13623 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13624 }
13625 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13626
13627 /* add base */
13628 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13629 {
13630 case 0: u64EffAddr += pCtx->rax; break;
13631 case 1: u64EffAddr += pCtx->rcx; break;
13632 case 2: u64EffAddr += pCtx->rdx; break;
13633 case 3: u64EffAddr += pCtx->rbx; break;
13634 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13635 case 6: u64EffAddr += pCtx->rsi; break;
13636 case 7: u64EffAddr += pCtx->rdi; break;
13637 case 8: u64EffAddr += pCtx->r8; break;
13638 case 9: u64EffAddr += pCtx->r9; break;
13639 case 10: u64EffAddr += pCtx->r10; break;
13640 case 11: u64EffAddr += pCtx->r11; break;
13641 case 12: u64EffAddr += pCtx->r12; break;
13642 case 14: u64EffAddr += pCtx->r14; break;
13643 case 15: u64EffAddr += pCtx->r15; break;
13644 /* complicated encodings */
13645 case 5:
13646 case 13:
13647 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13648 {
13649 if (!pVCpu->iem.s.uRexB)
13650 {
13651 u64EffAddr += pCtx->rbp;
13652 SET_SS_DEF();
13653 }
13654 else
13655 u64EffAddr += pCtx->r13;
13656 }
13657 else
13658 {
13659 uint32_t u32Disp;
13660 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13661 u64EffAddr += (int32_t)u32Disp;
13662 }
13663 break;
13664 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13665 }
13666 break;
13667 }
13668 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13669 }
13670
13671 /* Get and add the displacement. */
13672 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13673 {
13674 case 0:
13675 break;
13676 case 1:
13677 {
13678 int8_t i8Disp;
13679 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13680 u64EffAddr += i8Disp;
13681 break;
13682 }
13683 case 2:
13684 {
13685 uint32_t u32Disp;
13686 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13687 u64EffAddr += (int32_t)u32Disp;
13688 break;
13689 }
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13691 }
13692
13693 }
13694
13695 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13696 {
13697 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13698 return u64EffAddr;
13699 }
13700 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13701 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13702 return u64EffAddr & UINT32_MAX;
13703}
13704#endif /* IEM_WITH_SETJMP */
13705
13706
13707/** @} */
13708
13709
13710
13711/*
13712 * Include the instructions
13713 */
13714#include "IEMAllInstructions.cpp.h"
13715
13716
13717
13718
13719#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13720
13721/**
13722 * Sets up execution verification mode.
13723 */
13724IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13725{
13726 PVMCPU pVCpu = pVCpu;
13727 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13728
13729 /*
13730 * Always note down the address of the current instruction.
13731 */
13732 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13733 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13734
13735 /*
13736 * Enable verification and/or logging.
13737 */
13738 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13739 if ( fNewNoRem
13740 && ( 0
13741#if 0 /* auto enable on first paged protected mode interrupt */
13742 || ( pOrgCtx->eflags.Bits.u1IF
13743 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13744 && TRPMHasTrap(pVCpu)
13745 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13746#endif
13747#if 0
13748 || ( pOrgCtx->cs == 0x10
13749 && ( pOrgCtx->rip == 0x90119e3e
13750 || pOrgCtx->rip == 0x901d9810)
13751#endif
13752#if 0 /* Auto enable DSL - FPU stuff. */
13753 || ( pOrgCtx->cs == 0x10
13754 && (// pOrgCtx->rip == 0xc02ec07f
13755 //|| pOrgCtx->rip == 0xc02ec082
13756 //|| pOrgCtx->rip == 0xc02ec0c9
13757 0
13758 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13759#endif
13760#if 0 /* Auto enable DSL - fstp st0 stuff. */
13761 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13762#endif
13763#if 0
13764 || pOrgCtx->rip == 0x9022bb3a
13765#endif
13766#if 0
13767 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13768#endif
13769#if 0
13770 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13771 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13772#endif
13773#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13775 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13776 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13777#endif
13778#if 0 /* NT4SP1 - xadd early boot. */
13779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13780#endif
13781#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13783#endif
13784#if 0 /* NT4SP1 - cmpxchg (AMD). */
13785 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13786#endif
13787#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13789#endif
13790#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13791 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13792
13793#endif
13794#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13796
13797#endif
13798#if 0 /* NT4SP1 - frstor [ecx] */
13799 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13800#endif
13801#if 0 /* xxxxxx - All long mode code. */
13802 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13803#endif
13804#if 0 /* rep movsq linux 3.7 64-bit boot. */
13805 || (pOrgCtx->rip == 0x0000000000100241)
13806#endif
13807#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13808 || (pOrgCtx->rip == 0x000000000215e240)
13809#endif
13810#if 0 /* DOS's size-overridden iret to v8086. */
13811 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13812#endif
13813 )
13814 )
13815 {
13816 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13817 RTLogFlags(NULL, "enabled");
13818 fNewNoRem = false;
13819 }
13820 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13821 {
13822 pVCpu->iem.s.fNoRem = fNewNoRem;
13823 if (!fNewNoRem)
13824 {
13825 LogAlways(("Enabling verification mode!\n"));
13826 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13827 }
13828 else
13829 LogAlways(("Disabling verification mode!\n"));
13830 }
13831
13832 /*
13833 * Switch state.
13834 */
13835 if (IEM_VERIFICATION_ENABLED(pVCpu))
13836 {
13837 static CPUMCTX s_DebugCtx; /* Ugly! */
13838
13839 s_DebugCtx = *pOrgCtx;
13840 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13841 }
13842
13843 /*
13844 * See if there is an interrupt pending in TRPM and inject it if we can.
13845 */
13846 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13847 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13848#if defined(VBOX_WITH_NESTED_HWVIRT)
13849 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif;
13850 if (fIntrEnabled)
13851 {
13852 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13853 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
13854 else
13855 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13856 }
13857#else
13858 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13859#endif
13860 if ( fIntrEnabled
13861 && TRPMHasTrap(pVCpu)
13862 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13863 {
13864 uint8_t u8TrapNo;
13865 TRPMEVENT enmType;
13866 RTGCUINT uErrCode;
13867 RTGCPTR uCr2;
13868 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13869 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13870 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13871 TRPMResetTrap(pVCpu);
13872 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13873 }
13874
13875 /*
13876 * Reset the counters.
13877 */
13878 pVCpu->iem.s.cIOReads = 0;
13879 pVCpu->iem.s.cIOWrites = 0;
13880 pVCpu->iem.s.fIgnoreRaxRdx = false;
13881 pVCpu->iem.s.fOverlappingMovs = false;
13882 pVCpu->iem.s.fProblematicMemory = false;
13883 pVCpu->iem.s.fUndefinedEFlags = 0;
13884
13885 if (IEM_VERIFICATION_ENABLED(pVCpu))
13886 {
13887 /*
13888 * Free all verification records.
13889 */
13890 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13891 pVCpu->iem.s.pIemEvtRecHead = NULL;
13892 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13893 do
13894 {
13895 while (pEvtRec)
13896 {
13897 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13898 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13899 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13900 pEvtRec = pNext;
13901 }
13902 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13903 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13904 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13905 } while (pEvtRec);
13906 }
13907}
13908
13909
13910/**
13911 * Allocate an event record.
13912 * @returns Pointer to a record.
13913 */
13914IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13915{
13916 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13917 return NULL;
13918
13919 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13920 if (pEvtRec)
13921 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13922 else
13923 {
13924 if (!pVCpu->iem.s.ppIemEvtRecNext)
13925 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13926
13927 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13928 if (!pEvtRec)
13929 return NULL;
13930 }
13931 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13932 pEvtRec->pNext = NULL;
13933 return pEvtRec;
13934}
13935
13936
13937/**
13938 * IOMMMIORead notification.
13939 */
13940VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13941{
13942 PVMCPU pVCpu = VMMGetCpu(pVM);
13943 if (!pVCpu)
13944 return;
13945 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13946 if (!pEvtRec)
13947 return;
13948 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13949 pEvtRec->u.RamRead.GCPhys = GCPhys;
13950 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13951 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13952 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13953}
13954
13955
13956/**
13957 * IOMMMIOWrite notification.
13958 */
13959VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13960{
13961 PVMCPU pVCpu = VMMGetCpu(pVM);
13962 if (!pVCpu)
13963 return;
13964 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13965 if (!pEvtRec)
13966 return;
13967 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13968 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13969 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13970 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13971 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13972 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13973 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13974 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13975 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13976}
13977
13978
13979/**
13980 * IOMIOPortRead notification.
13981 */
13982VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13983{
13984 PVMCPU pVCpu = VMMGetCpu(pVM);
13985 if (!pVCpu)
13986 return;
13987 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13988 if (!pEvtRec)
13989 return;
13990 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13991 pEvtRec->u.IOPortRead.Port = Port;
13992 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13993 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13994 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13995}
13996
13997/**
13998 * IOMIOPortWrite notification.
13999 */
14000VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14001{
14002 PVMCPU pVCpu = VMMGetCpu(pVM);
14003 if (!pVCpu)
14004 return;
14005 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14006 if (!pEvtRec)
14007 return;
14008 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14009 pEvtRec->u.IOPortWrite.Port = Port;
14010 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14011 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14012 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14013 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14014}
14015
14016
14017VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14018{
14019 PVMCPU pVCpu = VMMGetCpu(pVM);
14020 if (!pVCpu)
14021 return;
14022 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14023 if (!pEvtRec)
14024 return;
14025 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14026 pEvtRec->u.IOPortStrRead.Port = Port;
14027 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14028 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14029 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14030 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14031}
14032
14033
14034VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14035{
14036 PVMCPU pVCpu = VMMGetCpu(pVM);
14037 if (!pVCpu)
14038 return;
14039 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14040 if (!pEvtRec)
14041 return;
14042 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14043 pEvtRec->u.IOPortStrWrite.Port = Port;
14044 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14045 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14046 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14047 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14048}
14049
14050
14051/**
14052 * Fakes and records an I/O port read.
14053 *
14054 * @returns VINF_SUCCESS.
14055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14056 * @param Port The I/O port.
14057 * @param pu32Value Where to store the fake value.
14058 * @param cbValue The size of the access.
14059 */
14060IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14061{
14062 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14063 if (pEvtRec)
14064 {
14065 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14066 pEvtRec->u.IOPortRead.Port = Port;
14067 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14068 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14069 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14070 }
14071 pVCpu->iem.s.cIOReads++;
14072 *pu32Value = 0xcccccccc;
14073 return VINF_SUCCESS;
14074}
14075
14076
14077/**
14078 * Fakes and records an I/O port write.
14079 *
14080 * @returns VINF_SUCCESS.
14081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14082 * @param Port The I/O port.
14083 * @param u32Value The value being written.
14084 * @param cbValue The size of the access.
14085 */
14086IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14087{
14088 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14089 if (pEvtRec)
14090 {
14091 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14092 pEvtRec->u.IOPortWrite.Port = Port;
14093 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14094 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14095 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14096 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14097 }
14098 pVCpu->iem.s.cIOWrites++;
14099 return VINF_SUCCESS;
14100}
14101
14102
14103/**
14104 * Used to add extra details about a stub case.
14105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14106 */
14107IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14108{
14109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14110 PVM pVM = pVCpu->CTX_SUFF(pVM);
14111 PVMCPU pVCpu = pVCpu;
14112 char szRegs[4096];
14113 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14114 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14115 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14116 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14117 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14118 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14119 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14120 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14121 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14122 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14123 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14124 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14125 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14126 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14127 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14128 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14129 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14130 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14131 " efer=%016VR{efer}\n"
14132 " pat=%016VR{pat}\n"
14133 " sf_mask=%016VR{sf_mask}\n"
14134 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14135 " lstar=%016VR{lstar}\n"
14136 " star=%016VR{star} cstar=%016VR{cstar}\n"
14137 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14138 );
14139
14140 char szInstr1[256];
14141 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14142 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14143 szInstr1, sizeof(szInstr1), NULL);
14144 char szInstr2[256];
14145 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14146 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14147 szInstr2, sizeof(szInstr2), NULL);
14148
14149 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14150}
14151
14152
14153/**
14154 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14155 * dump to the assertion info.
14156 *
14157 * @param pEvtRec The record to dump.
14158 */
14159IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14160{
14161 switch (pEvtRec->enmEvent)
14162 {
14163 case IEMVERIFYEVENT_IOPORT_READ:
14164 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14165 pEvtRec->u.IOPortWrite.Port,
14166 pEvtRec->u.IOPortWrite.cbValue);
14167 break;
14168 case IEMVERIFYEVENT_IOPORT_WRITE:
14169 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14170 pEvtRec->u.IOPortWrite.Port,
14171 pEvtRec->u.IOPortWrite.cbValue,
14172 pEvtRec->u.IOPortWrite.u32Value);
14173 break;
14174 case IEMVERIFYEVENT_IOPORT_STR_READ:
14175 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14176 pEvtRec->u.IOPortStrWrite.Port,
14177 pEvtRec->u.IOPortStrWrite.cbValue,
14178 pEvtRec->u.IOPortStrWrite.cTransfers);
14179 break;
14180 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14181 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14182 pEvtRec->u.IOPortStrWrite.Port,
14183 pEvtRec->u.IOPortStrWrite.cbValue,
14184 pEvtRec->u.IOPortStrWrite.cTransfers);
14185 break;
14186 case IEMVERIFYEVENT_RAM_READ:
14187 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14188 pEvtRec->u.RamRead.GCPhys,
14189 pEvtRec->u.RamRead.cb);
14190 break;
14191 case IEMVERIFYEVENT_RAM_WRITE:
14192 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14193 pEvtRec->u.RamWrite.GCPhys,
14194 pEvtRec->u.RamWrite.cb,
14195 (int)pEvtRec->u.RamWrite.cb,
14196 pEvtRec->u.RamWrite.ab);
14197 break;
14198 default:
14199 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14200 break;
14201 }
14202}
14203
14204
14205/**
14206 * Raises an assertion on the specified record, showing the given message with
14207 * a record dump attached.
14208 *
14209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14210 * @param pEvtRec1 The first record.
14211 * @param pEvtRec2 The second record.
14212 * @param pszMsg The message explaining why we're asserting.
14213 */
14214IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14215{
14216 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14217 iemVerifyAssertAddRecordDump(pEvtRec1);
14218 iemVerifyAssertAddRecordDump(pEvtRec2);
14219 iemVerifyAssertMsg2(pVCpu);
14220 RTAssertPanic();
14221}
14222
14223
14224/**
14225 * Raises an assertion on the specified record, showing the given message with
14226 * a record dump attached.
14227 *
14228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14229 * @param pEvtRec1 The first record.
14230 * @param pszMsg The message explaining why we're asserting.
14231 */
14232IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14233{
14234 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14235 iemVerifyAssertAddRecordDump(pEvtRec);
14236 iemVerifyAssertMsg2(pVCpu);
14237 RTAssertPanic();
14238}
14239
14240
14241/**
14242 * Verifies a write record.
14243 *
14244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14245 * @param pEvtRec The write record.
14246 * @param fRem Set if REM was doing the other executing. If clear
14247 * it was HM.
14248 */
14249IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14250{
14251 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14252 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14253 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14254 if ( RT_FAILURE(rc)
14255 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14256 {
14257 /* fend off ins */
14258 if ( !pVCpu->iem.s.cIOReads
14259 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14260 || ( pEvtRec->u.RamWrite.cb != 1
14261 && pEvtRec->u.RamWrite.cb != 2
14262 && pEvtRec->u.RamWrite.cb != 4) )
14263 {
14264 /* fend off ROMs and MMIO */
14265 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14266 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14267 {
14268 /* fend off fxsave */
14269 if (pEvtRec->u.RamWrite.cb != 512)
14270 {
14271 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14272 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14273 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14274 RTAssertMsg2Add("%s: %.*Rhxs\n"
14275 "iem: %.*Rhxs\n",
14276 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14277 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14278 iemVerifyAssertAddRecordDump(pEvtRec);
14279 iemVerifyAssertMsg2(pVCpu);
14280 RTAssertPanic();
14281 }
14282 }
14283 }
14284 }
14285
14286}
14287
14288/**
14289 * Performs the post-execution verfication checks.
14290 */
14291IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14292{
14293 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14294 return rcStrictIem;
14295
14296 /*
14297 * Switch back the state.
14298 */
14299 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14300 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14301 Assert(pOrgCtx != pDebugCtx);
14302 IEM_GET_CTX(pVCpu) = pOrgCtx;
14303
14304 /*
14305 * Execute the instruction in REM.
14306 */
14307 bool fRem = false;
14308 PVM pVM = pVCpu->CTX_SUFF(pVM);
14309 PVMCPU pVCpu = pVCpu;
14310 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14311#ifdef IEM_VERIFICATION_MODE_FULL_HM
14312 if ( HMIsEnabled(pVM)
14313 && pVCpu->iem.s.cIOReads == 0
14314 && pVCpu->iem.s.cIOWrites == 0
14315 && !pVCpu->iem.s.fProblematicMemory)
14316 {
14317 uint64_t uStartRip = pOrgCtx->rip;
14318 unsigned iLoops = 0;
14319 do
14320 {
14321 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14322 iLoops++;
14323 } while ( rc == VINF_SUCCESS
14324 || ( rc == VINF_EM_DBG_STEPPED
14325 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14326 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14327 || ( pOrgCtx->rip != pDebugCtx->rip
14328 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14329 && iLoops < 8) );
14330 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14331 rc = VINF_SUCCESS;
14332 }
14333#endif
14334 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14335 || rc == VINF_IOM_R3_IOPORT_READ
14336 || rc == VINF_IOM_R3_IOPORT_WRITE
14337 || rc == VINF_IOM_R3_MMIO_READ
14338 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14339 || rc == VINF_IOM_R3_MMIO_WRITE
14340 || rc == VINF_CPUM_R3_MSR_READ
14341 || rc == VINF_CPUM_R3_MSR_WRITE
14342 || rc == VINF_EM_RESCHEDULE
14343 )
14344 {
14345 EMRemLock(pVM);
14346 rc = REMR3EmulateInstruction(pVM, pVCpu);
14347 AssertRC(rc);
14348 EMRemUnlock(pVM);
14349 fRem = true;
14350 }
14351
14352# if 1 /* Skip unimplemented instructions for now. */
14353 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14354 {
14355 IEM_GET_CTX(pVCpu) = pOrgCtx;
14356 if (rc == VINF_EM_DBG_STEPPED)
14357 return VINF_SUCCESS;
14358 return rc;
14359 }
14360# endif
14361
14362 /*
14363 * Compare the register states.
14364 */
14365 unsigned cDiffs = 0;
14366 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14367 {
14368 //Log(("REM and IEM ends up with different registers!\n"));
14369 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14370
14371# define CHECK_FIELD(a_Field) \
14372 do \
14373 { \
14374 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14375 { \
14376 switch (sizeof(pOrgCtx->a_Field)) \
14377 { \
14378 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14379 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14380 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14381 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14382 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14383 } \
14384 cDiffs++; \
14385 } \
14386 } while (0)
14387# define CHECK_XSTATE_FIELD(a_Field) \
14388 do \
14389 { \
14390 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14391 { \
14392 switch (sizeof(pOrgXState->a_Field)) \
14393 { \
14394 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14395 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14396 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14397 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14398 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14399 } \
14400 cDiffs++; \
14401 } \
14402 } while (0)
14403
14404# define CHECK_BIT_FIELD(a_Field) \
14405 do \
14406 { \
14407 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14408 { \
14409 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14410 cDiffs++; \
14411 } \
14412 } while (0)
14413
14414# define CHECK_SEL(a_Sel) \
14415 do \
14416 { \
14417 CHECK_FIELD(a_Sel.Sel); \
14418 CHECK_FIELD(a_Sel.Attr.u); \
14419 CHECK_FIELD(a_Sel.u64Base); \
14420 CHECK_FIELD(a_Sel.u32Limit); \
14421 CHECK_FIELD(a_Sel.fFlags); \
14422 } while (0)
14423
14424 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14425 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14426
14427#if 1 /* The recompiler doesn't update these the intel way. */
14428 if (fRem)
14429 {
14430 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14431 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14432 pOrgXState->x87.CS = pDebugXState->x87.CS;
14433 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14434 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14435 pOrgXState->x87.DS = pDebugXState->x87.DS;
14436 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14437 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14438 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14439 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14440 }
14441#endif
14442 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14443 {
14444 RTAssertMsg2Weak(" the FPU state differs\n");
14445 cDiffs++;
14446 CHECK_XSTATE_FIELD(x87.FCW);
14447 CHECK_XSTATE_FIELD(x87.FSW);
14448 CHECK_XSTATE_FIELD(x87.FTW);
14449 CHECK_XSTATE_FIELD(x87.FOP);
14450 CHECK_XSTATE_FIELD(x87.FPUIP);
14451 CHECK_XSTATE_FIELD(x87.CS);
14452 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14453 CHECK_XSTATE_FIELD(x87.FPUDP);
14454 CHECK_XSTATE_FIELD(x87.DS);
14455 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14456 CHECK_XSTATE_FIELD(x87.MXCSR);
14457 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14458 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14459 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14460 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14461 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14462 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14463 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14464 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14465 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14466 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14467 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14468 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14469 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14470 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14471 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14472 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14473 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14474 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14475 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14476 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14477 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14478 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14479 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14480 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14481 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14482 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14483 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14484 }
14485 CHECK_FIELD(rip);
14486 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14487 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14488 {
14489 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14490 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14491 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14492 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14493 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14494 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14495 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14496 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14497 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14498 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14499 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14500 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14501 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14502 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14503 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14504 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14505 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14506 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14507 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14508 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14509 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14510 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14511 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14512 }
14513
14514 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14515 CHECK_FIELD(rax);
14516 CHECK_FIELD(rcx);
14517 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14518 CHECK_FIELD(rdx);
14519 CHECK_FIELD(rbx);
14520 CHECK_FIELD(rsp);
14521 CHECK_FIELD(rbp);
14522 CHECK_FIELD(rsi);
14523 CHECK_FIELD(rdi);
14524 CHECK_FIELD(r8);
14525 CHECK_FIELD(r9);
14526 CHECK_FIELD(r10);
14527 CHECK_FIELD(r11);
14528 CHECK_FIELD(r12);
14529 CHECK_FIELD(r13);
14530 CHECK_SEL(cs);
14531 CHECK_SEL(ss);
14532 CHECK_SEL(ds);
14533 CHECK_SEL(es);
14534 CHECK_SEL(fs);
14535 CHECK_SEL(gs);
14536 CHECK_FIELD(cr0);
14537
14538 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14539 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14540 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14541 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14542 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14543 {
14544 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14545 { /* ignore */ }
14546 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14547 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14548 && fRem)
14549 { /* ignore */ }
14550 else
14551 CHECK_FIELD(cr2);
14552 }
14553 CHECK_FIELD(cr3);
14554 CHECK_FIELD(cr4);
14555 CHECK_FIELD(dr[0]);
14556 CHECK_FIELD(dr[1]);
14557 CHECK_FIELD(dr[2]);
14558 CHECK_FIELD(dr[3]);
14559 CHECK_FIELD(dr[6]);
14560 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14561 CHECK_FIELD(dr[7]);
14562 CHECK_FIELD(gdtr.cbGdt);
14563 CHECK_FIELD(gdtr.pGdt);
14564 CHECK_FIELD(idtr.cbIdt);
14565 CHECK_FIELD(idtr.pIdt);
14566 CHECK_SEL(ldtr);
14567 CHECK_SEL(tr);
14568 CHECK_FIELD(SysEnter.cs);
14569 CHECK_FIELD(SysEnter.eip);
14570 CHECK_FIELD(SysEnter.esp);
14571 CHECK_FIELD(msrEFER);
14572 CHECK_FIELD(msrSTAR);
14573 CHECK_FIELD(msrPAT);
14574 CHECK_FIELD(msrLSTAR);
14575 CHECK_FIELD(msrCSTAR);
14576 CHECK_FIELD(msrSFMASK);
14577 CHECK_FIELD(msrKERNELGSBASE);
14578
14579 if (cDiffs != 0)
14580 {
14581 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14582 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14583 RTAssertPanic();
14584 static bool volatile s_fEnterDebugger = true;
14585 if (s_fEnterDebugger)
14586 DBGFSTOP(pVM);
14587
14588# if 1 /* Ignore unimplemented instructions for now. */
14589 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14590 rcStrictIem = VINF_SUCCESS;
14591# endif
14592 }
14593# undef CHECK_FIELD
14594# undef CHECK_BIT_FIELD
14595 }
14596
14597 /*
14598 * If the register state compared fine, check the verification event
14599 * records.
14600 */
14601 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14602 {
14603 /*
14604 * Compare verficiation event records.
14605 * - I/O port accesses should be a 1:1 match.
14606 */
14607 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14608 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14609 while (pIemRec && pOtherRec)
14610 {
14611 /* Since we might miss RAM writes and reads, ignore reads and check
14612 that any written memory is the same extra ones. */
14613 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14614 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14615 && pIemRec->pNext)
14616 {
14617 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14618 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14619 pIemRec = pIemRec->pNext;
14620 }
14621
14622 /* Do the compare. */
14623 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14624 {
14625 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14626 break;
14627 }
14628 bool fEquals;
14629 switch (pIemRec->enmEvent)
14630 {
14631 case IEMVERIFYEVENT_IOPORT_READ:
14632 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14633 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14634 break;
14635 case IEMVERIFYEVENT_IOPORT_WRITE:
14636 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14637 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14638 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14639 break;
14640 case IEMVERIFYEVENT_IOPORT_STR_READ:
14641 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14642 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14643 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14644 break;
14645 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14646 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14647 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14648 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14649 break;
14650 case IEMVERIFYEVENT_RAM_READ:
14651 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14652 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14653 break;
14654 case IEMVERIFYEVENT_RAM_WRITE:
14655 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14656 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14657 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14658 break;
14659 default:
14660 fEquals = false;
14661 break;
14662 }
14663 if (!fEquals)
14664 {
14665 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14666 break;
14667 }
14668
14669 /* advance */
14670 pIemRec = pIemRec->pNext;
14671 pOtherRec = pOtherRec->pNext;
14672 }
14673
14674 /* Ignore extra writes and reads. */
14675 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14676 {
14677 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14678 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14679 pIemRec = pIemRec->pNext;
14680 }
14681 if (pIemRec != NULL)
14682 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14683 else if (pOtherRec != NULL)
14684 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14685 }
14686 IEM_GET_CTX(pVCpu) = pOrgCtx;
14687
14688 return rcStrictIem;
14689}
14690
14691#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14692
14693/* stubs */
14694IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14695{
14696 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14697 return VERR_INTERNAL_ERROR;
14698}
14699
14700IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14701{
14702 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14703 return VERR_INTERNAL_ERROR;
14704}
14705
14706#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14707
14708
14709#ifdef LOG_ENABLED
14710/**
14711 * Logs the current instruction.
14712 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14713 * @param pCtx The current CPU context.
14714 * @param fSameCtx Set if we have the same context information as the VMM,
14715 * clear if we may have already executed an instruction in
14716 * our debug context. When clear, we assume IEMCPU holds
14717 * valid CPU mode info.
14718 */
14719IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14720{
14721# ifdef IN_RING3
14722 if (LogIs2Enabled())
14723 {
14724 char szInstr[256];
14725 uint32_t cbInstr = 0;
14726 if (fSameCtx)
14727 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14728 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14729 szInstr, sizeof(szInstr), &cbInstr);
14730 else
14731 {
14732 uint32_t fFlags = 0;
14733 switch (pVCpu->iem.s.enmCpuMode)
14734 {
14735 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14736 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14737 case IEMMODE_16BIT:
14738 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14739 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14740 else
14741 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14742 break;
14743 }
14744 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14745 szInstr, sizeof(szInstr), &cbInstr);
14746 }
14747
14748 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14749 Log2(("****\n"
14750 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14751 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14752 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14753 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14754 " %s\n"
14755 ,
14756 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14757 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14758 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14759 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14760 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14761 szInstr));
14762
14763 if (LogIs3Enabled())
14764 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14765 }
14766 else
14767# endif
14768 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14769 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14770 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14771}
14772#endif
14773
14774
14775/**
14776 * Makes status code addjustments (pass up from I/O and access handler)
14777 * as well as maintaining statistics.
14778 *
14779 * @returns Strict VBox status code to pass up.
14780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14781 * @param rcStrict The status from executing an instruction.
14782 */
14783DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14784{
14785 if (rcStrict != VINF_SUCCESS)
14786 {
14787 if (RT_SUCCESS(rcStrict))
14788 {
14789 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14790 || rcStrict == VINF_IOM_R3_IOPORT_READ
14791 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14792 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14793 || rcStrict == VINF_IOM_R3_MMIO_READ
14794 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14795 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14796 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14797 || rcStrict == VINF_CPUM_R3_MSR_READ
14798 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14799 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14800 || rcStrict == VINF_EM_RAW_TO_R3
14801 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14802 || rcStrict == VINF_EM_TRIPLE_FAULT
14803 /* raw-mode / virt handlers only: */
14804 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14805 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14806 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14807 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14808 || rcStrict == VINF_SELM_SYNC_GDT
14809 || rcStrict == VINF_CSAM_PENDING_ACTION
14810 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14811 /* nested hw.virt codes: */
14812 || rcStrict == VINF_SVM_VMEXIT
14813 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14814/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14815 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14816#ifdef VBOX_WITH_NESTED_HWVIRT
14817 if ( rcStrict == VINF_SVM_VMEXIT
14818 && rcPassUp == VINF_SUCCESS)
14819 rcStrict = VINF_SUCCESS;
14820 else
14821#endif
14822 if (rcPassUp == VINF_SUCCESS)
14823 pVCpu->iem.s.cRetInfStatuses++;
14824 else if ( rcPassUp < VINF_EM_FIRST
14825 || rcPassUp > VINF_EM_LAST
14826 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14827 {
14828 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14829 pVCpu->iem.s.cRetPassUpStatus++;
14830 rcStrict = rcPassUp;
14831 }
14832 else
14833 {
14834 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14835 pVCpu->iem.s.cRetInfStatuses++;
14836 }
14837 }
14838 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14839 pVCpu->iem.s.cRetAspectNotImplemented++;
14840 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14841 pVCpu->iem.s.cRetInstrNotImplemented++;
14842#ifdef IEM_VERIFICATION_MODE_FULL
14843 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14844 rcStrict = VINF_SUCCESS;
14845#endif
14846 else
14847 pVCpu->iem.s.cRetErrStatuses++;
14848 }
14849 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14850 {
14851 pVCpu->iem.s.cRetPassUpStatus++;
14852 rcStrict = pVCpu->iem.s.rcPassUp;
14853 }
14854
14855 return rcStrict;
14856}
14857
14858
14859/**
14860 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14861 * IEMExecOneWithPrefetchedByPC.
14862 *
14863 * Similar code is found in IEMExecLots.
14864 *
14865 * @return Strict VBox status code.
14866 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14868 * @param fExecuteInhibit If set, execute the instruction following CLI,
14869 * POP SS and MOV SS,GR.
14870 */
14871DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14872{
14873#ifdef IEM_WITH_SETJMP
14874 VBOXSTRICTRC rcStrict;
14875 jmp_buf JmpBuf;
14876 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14877 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14878 if ((rcStrict = setjmp(JmpBuf)) == 0)
14879 {
14880 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14881 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14882 }
14883 else
14884 pVCpu->iem.s.cLongJumps++;
14885 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14886#else
14887 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14888 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14889#endif
14890 if (rcStrict == VINF_SUCCESS)
14891 pVCpu->iem.s.cInstructions++;
14892 if (pVCpu->iem.s.cActiveMappings > 0)
14893 {
14894 Assert(rcStrict != VINF_SUCCESS);
14895 iemMemRollback(pVCpu);
14896 }
14897//#ifdef DEBUG
14898// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14899//#endif
14900
14901 /* Execute the next instruction as well if a cli, pop ss or
14902 mov ss, Gr has just completed successfully. */
14903 if ( fExecuteInhibit
14904 && rcStrict == VINF_SUCCESS
14905 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14906 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14907 {
14908 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14909 if (rcStrict == VINF_SUCCESS)
14910 {
14911#ifdef LOG_ENABLED
14912 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14913#endif
14914#ifdef IEM_WITH_SETJMP
14915 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14916 if ((rcStrict = setjmp(JmpBuf)) == 0)
14917 {
14918 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14919 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14920 }
14921 else
14922 pVCpu->iem.s.cLongJumps++;
14923 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14924#else
14925 IEM_OPCODE_GET_NEXT_U8(&b);
14926 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14927#endif
14928 if (rcStrict == VINF_SUCCESS)
14929 pVCpu->iem.s.cInstructions++;
14930 if (pVCpu->iem.s.cActiveMappings > 0)
14931 {
14932 Assert(rcStrict != VINF_SUCCESS);
14933 iemMemRollback(pVCpu);
14934 }
14935 }
14936 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14937 }
14938
14939 /*
14940 * Return value fiddling, statistics and sanity assertions.
14941 */
14942 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14943
14944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14946#if defined(IEM_VERIFICATION_MODE_FULL)
14947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14951#endif
14952 return rcStrict;
14953}
14954
14955
14956#ifdef IN_RC
14957/**
14958 * Re-enters raw-mode or ensure we return to ring-3.
14959 *
14960 * @returns rcStrict, maybe modified.
14961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14962 * @param pCtx The current CPU context.
14963 * @param rcStrict The status code returne by the interpreter.
14964 */
14965DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14966{
14967 if ( !pVCpu->iem.s.fInPatchCode
14968 && ( rcStrict == VINF_SUCCESS
14969 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14970 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14971 {
14972 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14973 CPUMRawEnter(pVCpu);
14974 else
14975 {
14976 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14977 rcStrict = VINF_EM_RESCHEDULE;
14978 }
14979 }
14980 return rcStrict;
14981}
14982#endif
14983
14984
14985/**
14986 * Execute one instruction.
14987 *
14988 * @return Strict VBox status code.
14989 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14990 */
14991VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14992{
14993#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14994 if (++pVCpu->iem.s.cVerifyDepth == 1)
14995 iemExecVerificationModeSetup(pVCpu);
14996#endif
14997#ifdef LOG_ENABLED
14998 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14999 iemLogCurInstr(pVCpu, pCtx, true);
15000#endif
15001
15002 /*
15003 * Do the decoding and emulation.
15004 */
15005 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15006 if (rcStrict == VINF_SUCCESS)
15007 rcStrict = iemExecOneInner(pVCpu, true);
15008
15009#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15010 /*
15011 * Assert some sanity.
15012 */
15013 if (pVCpu->iem.s.cVerifyDepth == 1)
15014 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15015 pVCpu->iem.s.cVerifyDepth--;
15016#endif
15017#ifdef IN_RC
15018 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15019#endif
15020 if (rcStrict != VINF_SUCCESS)
15021 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15022 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15023 return rcStrict;
15024}
15025
15026
15027VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15028{
15029 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15030 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15031
15032 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15033 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15034 if (rcStrict == VINF_SUCCESS)
15035 {
15036 rcStrict = iemExecOneInner(pVCpu, true);
15037 if (pcbWritten)
15038 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15039 }
15040
15041#ifdef IN_RC
15042 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15043#endif
15044 return rcStrict;
15045}
15046
15047
15048VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15049 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15050{
15051 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15052 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15053
15054 VBOXSTRICTRC rcStrict;
15055 if ( cbOpcodeBytes
15056 && pCtx->rip == OpcodeBytesPC)
15057 {
15058 iemInitDecoder(pVCpu, false);
15059#ifdef IEM_WITH_CODE_TLB
15060 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15061 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15062 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15063 pVCpu->iem.s.offCurInstrStart = 0;
15064 pVCpu->iem.s.offInstrNextByte = 0;
15065#else
15066 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15067 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15068#endif
15069 rcStrict = VINF_SUCCESS;
15070 }
15071 else
15072 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15073 if (rcStrict == VINF_SUCCESS)
15074 {
15075 rcStrict = iemExecOneInner(pVCpu, true);
15076 }
15077
15078#ifdef IN_RC
15079 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15080#endif
15081 return rcStrict;
15082}
15083
15084
15085VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15086{
15087 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15088 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15089
15090 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15091 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15092 if (rcStrict == VINF_SUCCESS)
15093 {
15094 rcStrict = iemExecOneInner(pVCpu, false);
15095 if (pcbWritten)
15096 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15097 }
15098
15099#ifdef IN_RC
15100 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15101#endif
15102 return rcStrict;
15103}
15104
15105
15106VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15107 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15108{
15109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15110 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15111
15112 VBOXSTRICTRC rcStrict;
15113 if ( cbOpcodeBytes
15114 && pCtx->rip == OpcodeBytesPC)
15115 {
15116 iemInitDecoder(pVCpu, true);
15117#ifdef IEM_WITH_CODE_TLB
15118 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15119 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15120 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15121 pVCpu->iem.s.offCurInstrStart = 0;
15122 pVCpu->iem.s.offInstrNextByte = 0;
15123#else
15124 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15125 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15126#endif
15127 rcStrict = VINF_SUCCESS;
15128 }
15129 else
15130 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15131 if (rcStrict == VINF_SUCCESS)
15132 rcStrict = iemExecOneInner(pVCpu, false);
15133
15134#ifdef IN_RC
15135 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15136#endif
15137 return rcStrict;
15138}
15139
15140
15141/**
15142 * For debugging DISGetParamSize, may come in handy.
15143 *
15144 * @returns Strict VBox status code.
15145 * @param pVCpu The cross context virtual CPU structure of the
15146 * calling EMT.
15147 * @param pCtxCore The context core structure.
15148 * @param OpcodeBytesPC The PC of the opcode bytes.
15149 * @param pvOpcodeBytes Prefeched opcode bytes.
15150 * @param cbOpcodeBytes Number of prefetched bytes.
15151 * @param pcbWritten Where to return the number of bytes written.
15152 * Optional.
15153 */
15154VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15155 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15156 uint32_t *pcbWritten)
15157{
15158 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15159 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15160
15161 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15162 VBOXSTRICTRC rcStrict;
15163 if ( cbOpcodeBytes
15164 && pCtx->rip == OpcodeBytesPC)
15165 {
15166 iemInitDecoder(pVCpu, true);
15167#ifdef IEM_WITH_CODE_TLB
15168 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15169 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15170 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15171 pVCpu->iem.s.offCurInstrStart = 0;
15172 pVCpu->iem.s.offInstrNextByte = 0;
15173#else
15174 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15175 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15176#endif
15177 rcStrict = VINF_SUCCESS;
15178 }
15179 else
15180 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15181 if (rcStrict == VINF_SUCCESS)
15182 {
15183 rcStrict = iemExecOneInner(pVCpu, false);
15184 if (pcbWritten)
15185 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15186 }
15187
15188#ifdef IN_RC
15189 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15190#endif
15191 return rcStrict;
15192}
15193
15194
15195VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15196{
15197 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15198
15199#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15200 /*
15201 * See if there is an interrupt pending in TRPM, inject it if we can.
15202 */
15203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15204# ifdef IEM_VERIFICATION_MODE_FULL
15205 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15206# endif
15207
15208 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15209#if defined(VBOX_WITH_NESTED_HWVIRT)
15210 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15211 if (fIntrEnabled)
15212 {
15213 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15214 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
15215 else
15216 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15217 }
15218#else
15219 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15220#endif
15221 if ( fIntrEnabled
15222 && TRPMHasTrap(pVCpu)
15223 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15224 {
15225 uint8_t u8TrapNo;
15226 TRPMEVENT enmType;
15227 RTGCUINT uErrCode;
15228 RTGCPTR uCr2;
15229 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15230 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15231 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15232 TRPMResetTrap(pVCpu);
15233 }
15234
15235 /*
15236 * Log the state.
15237 */
15238# ifdef LOG_ENABLED
15239 iemLogCurInstr(pVCpu, pCtx, true);
15240# endif
15241
15242 /*
15243 * Do the decoding and emulation.
15244 */
15245 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15246 if (rcStrict == VINF_SUCCESS)
15247 rcStrict = iemExecOneInner(pVCpu, true);
15248
15249 /*
15250 * Assert some sanity.
15251 */
15252 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15253
15254 /*
15255 * Log and return.
15256 */
15257 if (rcStrict != VINF_SUCCESS)
15258 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15259 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15260 if (pcInstructions)
15261 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15262 return rcStrict;
15263
15264#else /* Not verification mode */
15265
15266 /*
15267 * See if there is an interrupt pending in TRPM, inject it if we can.
15268 */
15269 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15270# ifdef IEM_VERIFICATION_MODE_FULL
15271 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15272# endif
15273
15274 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15275#if defined(VBOX_WITH_NESTED_HWVIRT)
15276 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15277 if (fIntrEnabled)
15278 {
15279 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15280 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
15281 else
15282 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15283 }
15284#else
15285 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15286#endif
15287 if ( fIntrEnabled
15288 && TRPMHasTrap(pVCpu)
15289 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15290 {
15291 uint8_t u8TrapNo;
15292 TRPMEVENT enmType;
15293 RTGCUINT uErrCode;
15294 RTGCPTR uCr2;
15295 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15296 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15297 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15298 TRPMResetTrap(pVCpu);
15299 }
15300
15301 /*
15302 * Initial decoder init w/ prefetch, then setup setjmp.
15303 */
15304 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15305 if (rcStrict == VINF_SUCCESS)
15306 {
15307# ifdef IEM_WITH_SETJMP
15308 jmp_buf JmpBuf;
15309 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15310 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15311 pVCpu->iem.s.cActiveMappings = 0;
15312 if ((rcStrict = setjmp(JmpBuf)) == 0)
15313# endif
15314 {
15315 /*
15316 * The run loop. We limit ourselves to 4096 instructions right now.
15317 */
15318 PVM pVM = pVCpu->CTX_SUFF(pVM);
15319 uint32_t cInstr = 4096;
15320 for (;;)
15321 {
15322 /*
15323 * Log the state.
15324 */
15325# ifdef LOG_ENABLED
15326 iemLogCurInstr(pVCpu, pCtx, true);
15327# endif
15328
15329 /*
15330 * Do the decoding and emulation.
15331 */
15332 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15333 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15334 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15335 {
15336 Assert(pVCpu->iem.s.cActiveMappings == 0);
15337 pVCpu->iem.s.cInstructions++;
15338 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15339 {
15340 uint32_t fCpu = pVCpu->fLocalForcedActions
15341 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15342 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15343 | VMCPU_FF_TLB_FLUSH
15344# ifdef VBOX_WITH_RAW_MODE
15345 | VMCPU_FF_TRPM_SYNC_IDT
15346 | VMCPU_FF_SELM_SYNC_TSS
15347 | VMCPU_FF_SELM_SYNC_GDT
15348 | VMCPU_FF_SELM_SYNC_LDT
15349# endif
15350 | VMCPU_FF_INHIBIT_INTERRUPTS
15351 | VMCPU_FF_BLOCK_NMIS
15352 | VMCPU_FF_UNHALT ));
15353
15354 if (RT_LIKELY( ( !fCpu
15355 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15356 && !pCtx->rflags.Bits.u1IF) )
15357 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15358 {
15359 if (cInstr-- > 0)
15360 {
15361 Assert(pVCpu->iem.s.cActiveMappings == 0);
15362 iemReInitDecoder(pVCpu);
15363 continue;
15364 }
15365 }
15366 }
15367 Assert(pVCpu->iem.s.cActiveMappings == 0);
15368 }
15369 else if (pVCpu->iem.s.cActiveMappings > 0)
15370 iemMemRollback(pVCpu);
15371 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15372 break;
15373 }
15374 }
15375# ifdef IEM_WITH_SETJMP
15376 else
15377 {
15378 if (pVCpu->iem.s.cActiveMappings > 0)
15379 iemMemRollback(pVCpu);
15380 pVCpu->iem.s.cLongJumps++;
15381# ifdef VBOX_WITH_NESTED_HWVIRT
15382 /*
15383 * When a nested-guest causes an exception intercept when fetching memory
15384 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15385 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15386 */
15387 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15388# endif
15389 }
15390 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15391# endif
15392
15393 /*
15394 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15395 */
15396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15398# if defined(IEM_VERIFICATION_MODE_FULL)
15399 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15400 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15401 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15402 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15403# endif
15404 }
15405# ifdef VBOX_WITH_NESTED_HWVIRT
15406 else
15407 {
15408 /*
15409 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15410 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15411 */
15412 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15413 }
15414# endif
15415
15416 /*
15417 * Maybe re-enter raw-mode and log.
15418 */
15419# ifdef IN_RC
15420 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15421# endif
15422 if (rcStrict != VINF_SUCCESS)
15423 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15424 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15425 if (pcInstructions)
15426 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15427 return rcStrict;
15428#endif /* Not verification mode */
15429}
15430
15431
15432
15433/**
15434 * Injects a trap, fault, abort, software interrupt or external interrupt.
15435 *
15436 * The parameter list matches TRPMQueryTrapAll pretty closely.
15437 *
15438 * @returns Strict VBox status code.
15439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15440 * @param u8TrapNo The trap number.
15441 * @param enmType What type is it (trap/fault/abort), software
15442 * interrupt or hardware interrupt.
15443 * @param uErrCode The error code if applicable.
15444 * @param uCr2 The CR2 value if applicable.
15445 * @param cbInstr The instruction length (only relevant for
15446 * software interrupts).
15447 */
15448VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15449 uint8_t cbInstr)
15450{
15451 iemInitDecoder(pVCpu, false);
15452#ifdef DBGFTRACE_ENABLED
15453 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15454 u8TrapNo, enmType, uErrCode, uCr2);
15455#endif
15456
15457 uint32_t fFlags;
15458 switch (enmType)
15459 {
15460 case TRPM_HARDWARE_INT:
15461 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15462 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15463 uErrCode = uCr2 = 0;
15464 break;
15465
15466 case TRPM_SOFTWARE_INT:
15467 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15468 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15469 uErrCode = uCr2 = 0;
15470 break;
15471
15472 case TRPM_TRAP:
15473 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15474 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15475 if (u8TrapNo == X86_XCPT_PF)
15476 fFlags |= IEM_XCPT_FLAGS_CR2;
15477 switch (u8TrapNo)
15478 {
15479 case X86_XCPT_DF:
15480 case X86_XCPT_TS:
15481 case X86_XCPT_NP:
15482 case X86_XCPT_SS:
15483 case X86_XCPT_PF:
15484 case X86_XCPT_AC:
15485 fFlags |= IEM_XCPT_FLAGS_ERR;
15486 break;
15487
15488 case X86_XCPT_NMI:
15489 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15490 break;
15491 }
15492 break;
15493
15494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15495 }
15496
15497 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15498}
15499
15500
15501/**
15502 * Injects the active TRPM event.
15503 *
15504 * @returns Strict VBox status code.
15505 * @param pVCpu The cross context virtual CPU structure.
15506 */
15507VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15508{
15509#ifndef IEM_IMPLEMENTS_TASKSWITCH
15510 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15511#else
15512 uint8_t u8TrapNo;
15513 TRPMEVENT enmType;
15514 RTGCUINT uErrCode;
15515 RTGCUINTPTR uCr2;
15516 uint8_t cbInstr;
15517 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15518 if (RT_FAILURE(rc))
15519 return rc;
15520
15521 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15522
15523 /** @todo Are there any other codes that imply the event was successfully
15524 * delivered to the guest? See @bugref{6607}. */
15525 if ( rcStrict == VINF_SUCCESS
15526 || rcStrict == VINF_IEM_RAISED_XCPT)
15527 {
15528 TRPMResetTrap(pVCpu);
15529 }
15530 return rcStrict;
15531#endif
15532}
15533
15534
15535VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15536{
15537 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15538 return VERR_NOT_IMPLEMENTED;
15539}
15540
15541
15542VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15543{
15544 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15545 return VERR_NOT_IMPLEMENTED;
15546}
15547
15548
15549#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15550/**
15551 * Executes a IRET instruction with default operand size.
15552 *
15553 * This is for PATM.
15554 *
15555 * @returns VBox status code.
15556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15557 * @param pCtxCore The register frame.
15558 */
15559VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15560{
15561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15562
15563 iemCtxCoreToCtx(pCtx, pCtxCore);
15564 iemInitDecoder(pVCpu);
15565 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15566 if (rcStrict == VINF_SUCCESS)
15567 iemCtxToCtxCore(pCtxCore, pCtx);
15568 else
15569 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15570 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15571 return rcStrict;
15572}
15573#endif
15574
15575
15576/**
15577 * Macro used by the IEMExec* method to check the given instruction length.
15578 *
15579 * Will return on failure!
15580 *
15581 * @param a_cbInstr The given instruction length.
15582 * @param a_cbMin The minimum length.
15583 */
15584#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15585 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15586 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15587
15588
15589/**
15590 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15591 *
15592 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15593 *
15594 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15596 * @param rcStrict The status code to fiddle.
15597 */
15598DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15599{
15600 iemUninitExec(pVCpu);
15601#ifdef IN_RC
15602 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15603 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15604#else
15605 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15606#endif
15607}
15608
15609
15610/**
15611 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15612 *
15613 * This API ASSUMES that the caller has already verified that the guest code is
15614 * allowed to access the I/O port. (The I/O port is in the DX register in the
15615 * guest state.)
15616 *
15617 * @returns Strict VBox status code.
15618 * @param pVCpu The cross context virtual CPU structure.
15619 * @param cbValue The size of the I/O port access (1, 2, or 4).
15620 * @param enmAddrMode The addressing mode.
15621 * @param fRepPrefix Indicates whether a repeat prefix is used
15622 * (doesn't matter which for this instruction).
15623 * @param cbInstr The instruction length in bytes.
15624 * @param iEffSeg The effective segment address.
15625 * @param fIoChecked Whether the access to the I/O port has been
15626 * checked or not. It's typically checked in the
15627 * HM scenario.
15628 */
15629VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15630 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15631{
15632 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15633 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15634
15635 /*
15636 * State init.
15637 */
15638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15639
15640 /*
15641 * Switch orgy for getting to the right handler.
15642 */
15643 VBOXSTRICTRC rcStrict;
15644 if (fRepPrefix)
15645 {
15646 switch (enmAddrMode)
15647 {
15648 case IEMMODE_16BIT:
15649 switch (cbValue)
15650 {
15651 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15652 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15653 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15654 default:
15655 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15656 }
15657 break;
15658
15659 case IEMMODE_32BIT:
15660 switch (cbValue)
15661 {
15662 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15663 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15664 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15665 default:
15666 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15667 }
15668 break;
15669
15670 case IEMMODE_64BIT:
15671 switch (cbValue)
15672 {
15673 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15674 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15675 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15676 default:
15677 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15678 }
15679 break;
15680
15681 default:
15682 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15683 }
15684 }
15685 else
15686 {
15687 switch (enmAddrMode)
15688 {
15689 case IEMMODE_16BIT:
15690 switch (cbValue)
15691 {
15692 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15693 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15694 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15695 default:
15696 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15697 }
15698 break;
15699
15700 case IEMMODE_32BIT:
15701 switch (cbValue)
15702 {
15703 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15704 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15705 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15706 default:
15707 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15708 }
15709 break;
15710
15711 case IEMMODE_64BIT:
15712 switch (cbValue)
15713 {
15714 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15715 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15716 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15717 default:
15718 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15719 }
15720 break;
15721
15722 default:
15723 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15724 }
15725 }
15726
15727 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15728}
15729
15730
15731/**
15732 * Interface for HM and EM for executing string I/O IN (read) instructions.
15733 *
15734 * This API ASSUMES that the caller has already verified that the guest code is
15735 * allowed to access the I/O port. (The I/O port is in the DX register in the
15736 * guest state.)
15737 *
15738 * @returns Strict VBox status code.
15739 * @param pVCpu The cross context virtual CPU structure.
15740 * @param cbValue The size of the I/O port access (1, 2, or 4).
15741 * @param enmAddrMode The addressing mode.
15742 * @param fRepPrefix Indicates whether a repeat prefix is used
15743 * (doesn't matter which for this instruction).
15744 * @param cbInstr The instruction length in bytes.
15745 * @param fIoChecked Whether the access to the I/O port has been
15746 * checked or not. It's typically checked in the
15747 * HM scenario.
15748 */
15749VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15750 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15751{
15752 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15753
15754 /*
15755 * State init.
15756 */
15757 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15758
15759 /*
15760 * Switch orgy for getting to the right handler.
15761 */
15762 VBOXSTRICTRC rcStrict;
15763 if (fRepPrefix)
15764 {
15765 switch (enmAddrMode)
15766 {
15767 case IEMMODE_16BIT:
15768 switch (cbValue)
15769 {
15770 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15771 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15772 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15773 default:
15774 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15775 }
15776 break;
15777
15778 case IEMMODE_32BIT:
15779 switch (cbValue)
15780 {
15781 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15782 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15783 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15784 default:
15785 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15786 }
15787 break;
15788
15789 case IEMMODE_64BIT:
15790 switch (cbValue)
15791 {
15792 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15793 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15794 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15795 default:
15796 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15797 }
15798 break;
15799
15800 default:
15801 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15802 }
15803 }
15804 else
15805 {
15806 switch (enmAddrMode)
15807 {
15808 case IEMMODE_16BIT:
15809 switch (cbValue)
15810 {
15811 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15812 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15813 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15814 default:
15815 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15816 }
15817 break;
15818
15819 case IEMMODE_32BIT:
15820 switch (cbValue)
15821 {
15822 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15823 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15824 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15825 default:
15826 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15827 }
15828 break;
15829
15830 case IEMMODE_64BIT:
15831 switch (cbValue)
15832 {
15833 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15834 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15835 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15836 default:
15837 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15838 }
15839 break;
15840
15841 default:
15842 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15843 }
15844 }
15845
15846 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15847}
15848
15849
15850/**
15851 * Interface for rawmode to write execute an OUT instruction.
15852 *
15853 * @returns Strict VBox status code.
15854 * @param pVCpu The cross context virtual CPU structure.
15855 * @param cbInstr The instruction length in bytes.
15856 * @param u16Port The port to read.
15857 * @param cbReg The register size.
15858 *
15859 * @remarks In ring-0 not all of the state needs to be synced in.
15860 */
15861VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15862{
15863 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15864 Assert(cbReg <= 4 && cbReg != 3);
15865
15866 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15867 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15869}
15870
15871
15872/**
15873 * Interface for rawmode to write execute an IN instruction.
15874 *
15875 * @returns Strict VBox status code.
15876 * @param pVCpu The cross context virtual CPU structure.
15877 * @param cbInstr The instruction length in bytes.
15878 * @param u16Port The port to read.
15879 * @param cbReg The register size.
15880 */
15881VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15882{
15883 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15884 Assert(cbReg <= 4 && cbReg != 3);
15885
15886 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15887 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15889}
15890
15891
15892/**
15893 * Interface for HM and EM to write to a CRx register.
15894 *
15895 * @returns Strict VBox status code.
15896 * @param pVCpu The cross context virtual CPU structure.
15897 * @param cbInstr The instruction length in bytes.
15898 * @param iCrReg The control register number (destination).
15899 * @param iGReg The general purpose register number (source).
15900 *
15901 * @remarks In ring-0 not all of the state needs to be synced in.
15902 */
15903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15904{
15905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15906 Assert(iCrReg < 16);
15907 Assert(iGReg < 16);
15908
15909 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15910 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15911 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15912}
15913
15914
15915/**
15916 * Interface for HM and EM to read from a CRx register.
15917 *
15918 * @returns Strict VBox status code.
15919 * @param pVCpu The cross context virtual CPU structure.
15920 * @param cbInstr The instruction length in bytes.
15921 * @param iGReg The general purpose register number (destination).
15922 * @param iCrReg The control register number (source).
15923 *
15924 * @remarks In ring-0 not all of the state needs to be synced in.
15925 */
15926VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15927{
15928 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15929 Assert(iCrReg < 16);
15930 Assert(iGReg < 16);
15931
15932 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15933 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15934 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15935}
15936
15937
15938/**
15939 * Interface for HM and EM to clear the CR0[TS] bit.
15940 *
15941 * @returns Strict VBox status code.
15942 * @param pVCpu The cross context virtual CPU structure.
15943 * @param cbInstr The instruction length in bytes.
15944 *
15945 * @remarks In ring-0 not all of the state needs to be synced in.
15946 */
15947VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15948{
15949 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15950
15951 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15952 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15953 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15954}
15955
15956
15957/**
15958 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15959 *
15960 * @returns Strict VBox status code.
15961 * @param pVCpu The cross context virtual CPU structure.
15962 * @param cbInstr The instruction length in bytes.
15963 * @param uValue The value to load into CR0.
15964 *
15965 * @remarks In ring-0 not all of the state needs to be synced in.
15966 */
15967VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15968{
15969 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15970
15971 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15972 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15974}
15975
15976
15977/**
15978 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15979 *
15980 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15981 *
15982 * @returns Strict VBox status code.
15983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15984 * @param cbInstr The instruction length in bytes.
15985 * @remarks In ring-0 not all of the state needs to be synced in.
15986 * @thread EMT(pVCpu)
15987 */
15988VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15989{
15990 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15991
15992 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15993 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15994 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15995}
15996
15997
15998/**
15999 * Interface for HM and EM to emulate the INVLPG instruction.
16000 *
16001 * @param pVCpu The cross context virtual CPU structure.
16002 * @param cbInstr The instruction length in bytes.
16003 * @param GCPtrPage The effective address of the page to invalidate.
16004 *
16005 * @remarks In ring-0 not all of the state needs to be synced in.
16006 */
16007VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16008{
16009 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16010
16011 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16012 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16013 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16014}
16015
16016
16017/**
16018 * Checks if IEM is in the process of delivering an event (interrupt or
16019 * exception).
16020 *
16021 * @returns true if we're in the process of raising an interrupt or exception,
16022 * false otherwise.
16023 * @param pVCpu The cross context virtual CPU structure.
16024 * @param puVector Where to store the vector associated with the
16025 * currently delivered event, optional.
16026 * @param pfFlags Where to store th event delivery flags (see
16027 * IEM_XCPT_FLAGS_XXX), optional.
16028 * @param puErr Where to store the error code associated with the
16029 * event, optional.
16030 * @param puCr2 Where to store the CR2 associated with the event,
16031 * optional.
16032 * @remarks The caller should check the flags to determine if the error code and
16033 * CR2 are valid for the event.
16034 */
16035VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16036{
16037 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16038 if (fRaisingXcpt)
16039 {
16040 if (puVector)
16041 *puVector = pVCpu->iem.s.uCurXcpt;
16042 if (pfFlags)
16043 *pfFlags = pVCpu->iem.s.fCurXcpt;
16044 if (puErr)
16045 *puErr = pVCpu->iem.s.uCurXcptErr;
16046 if (puCr2)
16047 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16048 }
16049 return fRaisingXcpt;
16050}
16051
16052#ifdef VBOX_WITH_NESTED_HWVIRT
16053/**
16054 * Interface for HM and EM to emulate the CLGI instruction.
16055 *
16056 * @returns Strict VBox status code.
16057 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16058 * @param cbInstr The instruction length in bytes.
16059 * @thread EMT(pVCpu)
16060 */
16061VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16062{
16063 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16064
16065 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16066 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16067 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16068}
16069
16070
16071/**
16072 * Interface for HM and EM to emulate the STGI instruction.
16073 *
16074 * @returns Strict VBox status code.
16075 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16076 * @param cbInstr The instruction length in bytes.
16077 * @thread EMT(pVCpu)
16078 */
16079VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16080{
16081 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16082
16083 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16084 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16086}
16087
16088
16089/**
16090 * Interface for HM and EM to emulate the VMLOAD instruction.
16091 *
16092 * @returns Strict VBox status code.
16093 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16094 * @param cbInstr The instruction length in bytes.
16095 * @thread EMT(pVCpu)
16096 */
16097VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16098{
16099 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16100
16101 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16102 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16103 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16104}
16105
16106
16107/**
16108 * Interface for HM and EM to emulate the VMSAVE instruction.
16109 *
16110 * @returns Strict VBox status code.
16111 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16112 * @param cbInstr The instruction length in bytes.
16113 * @thread EMT(pVCpu)
16114 */
16115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16116{
16117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16118
16119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16122}
16123
16124
16125/**
16126 * Interface for HM and EM to emulate the INVLPGA instruction.
16127 *
16128 * @returns Strict VBox status code.
16129 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16130 * @param cbInstr The instruction length in bytes.
16131 * @thread EMT(pVCpu)
16132 */
16133VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16134{
16135 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16136
16137 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16138 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16139 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16140}
16141
16142
16143/**
16144 * Interface for HM and EM to emulate the VMRUN instruction.
16145 *
16146 * @returns Strict VBox status code.
16147 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16148 * @param cbInstr The instruction length in bytes.
16149 * @thread EMT(pVCpu)
16150 */
16151VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16152{
16153 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16154
16155 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16156 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16157 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16158}
16159
16160
16161/**
16162 * Interface for HM and EM to emulate \#VMEXIT.
16163 *
16164 * @returns Strict VBox status code.
16165 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16166 * @param uExitCode The exit code.
16167 * @param uExitInfo1 The exit info. 1 field.
16168 * @param uExitInfo2 The exit info. 2 field.
16169 * @thread EMT(pVCpu)
16170 */
16171VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16172{
16173 return iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16174}
16175#endif /* VBOX_WITH_NESTED_HWVIRT */
16176
16177#ifdef IN_RING3
16178
16179/**
16180 * Handles the unlikely and probably fatal merge cases.
16181 *
16182 * @returns Merged status code.
16183 * @param rcStrict Current EM status code.
16184 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16185 * with @a rcStrict.
16186 * @param iMemMap The memory mapping index. For error reporting only.
16187 * @param pVCpu The cross context virtual CPU structure of the calling
16188 * thread, for error reporting only.
16189 */
16190DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16191 unsigned iMemMap, PVMCPU pVCpu)
16192{
16193 if (RT_FAILURE_NP(rcStrict))
16194 return rcStrict;
16195
16196 if (RT_FAILURE_NP(rcStrictCommit))
16197 return rcStrictCommit;
16198
16199 if (rcStrict == rcStrictCommit)
16200 return rcStrictCommit;
16201
16202 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16203 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16204 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16205 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16206 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16207 return VERR_IOM_FF_STATUS_IPE;
16208}
16209
16210
16211/**
16212 * Helper for IOMR3ProcessForceFlag.
16213 *
16214 * @returns Merged status code.
16215 * @param rcStrict Current EM status code.
16216 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16217 * with @a rcStrict.
16218 * @param iMemMap The memory mapping index. For error reporting only.
16219 * @param pVCpu The cross context virtual CPU structure of the calling
16220 * thread, for error reporting only.
16221 */
16222DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16223{
16224 /* Simple. */
16225 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16226 return rcStrictCommit;
16227
16228 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16229 return rcStrict;
16230
16231 /* EM scheduling status codes. */
16232 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16233 && rcStrict <= VINF_EM_LAST))
16234 {
16235 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16236 && rcStrictCommit <= VINF_EM_LAST))
16237 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16238 }
16239
16240 /* Unlikely */
16241 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16242}
16243
16244
16245/**
16246 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16247 *
16248 * @returns Merge between @a rcStrict and what the commit operation returned.
16249 * @param pVM The cross context VM structure.
16250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16251 * @param rcStrict The status code returned by ring-0 or raw-mode.
16252 */
16253VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16254{
16255 /*
16256 * Reset the pending commit.
16257 */
16258 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16259 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16260 ("%#x %#x %#x\n",
16261 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16262 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16263
16264 /*
16265 * Commit the pending bounce buffers (usually just one).
16266 */
16267 unsigned cBufs = 0;
16268 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16269 while (iMemMap-- > 0)
16270 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16271 {
16272 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16273 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16274 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16275
16276 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16277 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16278 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16279
16280 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16281 {
16282 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16283 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16284 pbBuf,
16285 cbFirst,
16286 PGMACCESSORIGIN_IEM);
16287 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16288 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16289 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16290 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16291 }
16292
16293 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16294 {
16295 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16297 pbBuf + cbFirst,
16298 cbSecond,
16299 PGMACCESSORIGIN_IEM);
16300 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16301 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16302 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16303 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16304 }
16305 cBufs++;
16306 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16307 }
16308
16309 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16310 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16311 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16312 pVCpu->iem.s.cActiveMappings = 0;
16313 return rcStrict;
16314}
16315
16316#endif /* IN_RING3 */
16317
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette