VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 70612

Last change on this file since 70612 was 70612, checked in by vboxsync, 7 years ago

VMM: Expose PCID, INVPCID, FSGSBASE features to guests. Implemented the relevant instructions in IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 638.8 KB
Line 
1/* $Id: IEMAll.cpp 70612 2018-01-17 18:12:23Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
890
891#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
892IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
893#endif
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
895IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
896
897#ifdef VBOX_WITH_NESTED_HWVIRT
898IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
899 uint64_t uExitInfo2);
900IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
901 uint32_t uErr, uint64_t uCr2);
902#endif
903
904/**
905 * Sets the pass up status.
906 *
907 * @returns VINF_SUCCESS.
908 * @param pVCpu The cross context virtual CPU structure of the
909 * calling thread.
910 * @param rcPassUp The pass up status. Must be informational.
911 * VINF_SUCCESS is not allowed.
912 */
913IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
914{
915 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
916
917 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
918 if (rcOldPassUp == VINF_SUCCESS)
919 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
920 /* If both are EM scheduling codes, use EM priority rules. */
921 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
922 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
923 {
924 if (rcPassUp < rcOldPassUp)
925 {
926 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
927 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
928 }
929 else
930 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
931 }
932 /* Override EM scheduling with specific status code. */
933 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
934 {
935 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
936 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
937 }
938 /* Don't override specific status code, first come first served. */
939 else
940 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Calculates the CPU mode.
947 *
948 * This is mainly for updating IEMCPU::enmCpuMode.
949 *
950 * @returns CPU mode.
951 * @param pCtx The register context for the CPU.
952 */
953DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
954{
955 if (CPUMIsGuestIn64BitCodeEx(pCtx))
956 return IEMMODE_64BIT;
957 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
958 return IEMMODE_32BIT;
959 return IEMMODE_16BIT;
960}
961
962
963/**
964 * Initializes the execution state.
965 *
966 * @param pVCpu The cross context virtual CPU structure of the
967 * calling thread.
968 * @param fBypassHandlers Whether to bypass access handlers.
969 *
970 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
971 * side-effects in strict builds.
972 */
973DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
974{
975 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
976
977 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
978
979#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
987 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
988#endif
989
990#ifdef VBOX_WITH_RAW_MODE_NOT_R0
991 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
992#endif
993 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
994 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
995#ifdef VBOX_STRICT
996 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1000 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1001 pVCpu->iem.s.uRexReg = 127;
1002 pVCpu->iem.s.uRexB = 127;
1003 pVCpu->iem.s.uRexIndex = 127;
1004 pVCpu->iem.s.iEffSeg = 127;
1005 pVCpu->iem.s.idxPrefix = 127;
1006 pVCpu->iem.s.uVex3rdReg = 127;
1007 pVCpu->iem.s.uVexLength = 127;
1008 pVCpu->iem.s.fEvexStuff = 127;
1009 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1010# ifdef IEM_WITH_CODE_TLB
1011 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1012 pVCpu->iem.s.pbInstrBuf = NULL;
1013 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1014 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1015 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1016 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1017# else
1018 pVCpu->iem.s.offOpcode = 127;
1019 pVCpu->iem.s.cbOpcode = 127;
1020# endif
1021#endif
1022
1023 pVCpu->iem.s.cActiveMappings = 0;
1024 pVCpu->iem.s.iNextMapping = 0;
1025 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1026 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1029 && pCtx->cs.u64Base == 0
1030 && pCtx->cs.u32Limit == UINT32_MAX
1031 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1032 if (!pVCpu->iem.s.fInPatchCode)
1033 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1034#endif
1035
1036#ifdef IEM_VERIFICATION_MODE_FULL
1037 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1038 pVCpu->iem.s.fNoRem = true;
1039#endif
1040}
1041
1042#ifdef VBOX_WITH_NESTED_HWVIRT
1043/**
1044 * Performs a minimal reinitialization of the execution state.
1045 *
1046 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1047 * 'world-switch' types operations on the CPU. Currently only nested
1048 * hardware-virtualization uses it.
1049 *
1050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1051 */
1052IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1053{
1054 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1055 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1056 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1057
1058 pVCpu->iem.s.uCpl = uCpl;
1059 pVCpu->iem.s.enmCpuMode = enmMode;
1060 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1061 pVCpu->iem.s.enmEffAddrMode = enmMode;
1062 if (enmMode != IEMMODE_64BIT)
1063 {
1064 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1065 pVCpu->iem.s.enmEffOpSize = enmMode;
1066 }
1067 else
1068 {
1069 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1070 pVCpu->iem.s.enmEffOpSize = enmMode;
1071 }
1072 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1073#ifndef IEM_WITH_CODE_TLB
1074 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1075 pVCpu->iem.s.offOpcode = 0;
1076 pVCpu->iem.s.cbOpcode = 0;
1077#endif
1078}
1079#endif
1080
1081/**
1082 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1083 *
1084 * @param pVCpu The cross context virtual CPU structure of the
1085 * calling thread.
1086 */
1087DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1088{
1089 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1090#ifdef IEM_VERIFICATION_MODE_FULL
1091 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1092#endif
1093#ifdef VBOX_STRICT
1094# ifdef IEM_WITH_CODE_TLB
1095 NOREF(pVCpu);
1096# else
1097 pVCpu->iem.s.cbOpcode = 0;
1098# endif
1099#else
1100 NOREF(pVCpu);
1101#endif
1102}
1103
1104
1105/**
1106 * Initializes the decoder state.
1107 *
1108 * iemReInitDecoder is mostly a copy of this function.
1109 *
1110 * @param pVCpu The cross context virtual CPU structure of the
1111 * calling thread.
1112 * @param fBypassHandlers Whether to bypass access handlers.
1113 */
1114DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1115{
1116 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1117
1118 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1119
1120#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1129#endif
1130
1131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1132 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1133#endif
1134 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1135#ifdef IEM_VERIFICATION_MODE_FULL
1136 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1137 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1138#endif
1139 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1140 pVCpu->iem.s.enmCpuMode = enmMode;
1141 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1142 pVCpu->iem.s.enmEffAddrMode = enmMode;
1143 if (enmMode != IEMMODE_64BIT)
1144 {
1145 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffOpSize = enmMode;
1147 }
1148 else
1149 {
1150 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1151 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1152 }
1153 pVCpu->iem.s.fPrefixes = 0;
1154 pVCpu->iem.s.uRexReg = 0;
1155 pVCpu->iem.s.uRexB = 0;
1156 pVCpu->iem.s.uRexIndex = 0;
1157 pVCpu->iem.s.idxPrefix = 0;
1158 pVCpu->iem.s.uVex3rdReg = 0;
1159 pVCpu->iem.s.uVexLength = 0;
1160 pVCpu->iem.s.fEvexStuff = 0;
1161 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1162#ifdef IEM_WITH_CODE_TLB
1163 pVCpu->iem.s.pbInstrBuf = NULL;
1164 pVCpu->iem.s.offInstrNextByte = 0;
1165 pVCpu->iem.s.offCurInstrStart = 0;
1166# ifdef VBOX_STRICT
1167 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1168 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1169 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1170# endif
1171#else
1172 pVCpu->iem.s.offOpcode = 0;
1173 pVCpu->iem.s.cbOpcode = 0;
1174#endif
1175 pVCpu->iem.s.cActiveMappings = 0;
1176 pVCpu->iem.s.iNextMapping = 0;
1177 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1178 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1179#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1180 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1181 && pCtx->cs.u64Base == 0
1182 && pCtx->cs.u32Limit == UINT32_MAX
1183 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1184 if (!pVCpu->iem.s.fInPatchCode)
1185 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1186#endif
1187
1188#ifdef DBGFTRACE_ENABLED
1189 switch (enmMode)
1190 {
1191 case IEMMODE_64BIT:
1192 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1193 break;
1194 case IEMMODE_32BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1196 break;
1197 case IEMMODE_16BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 }
1201#endif
1202}
1203
1204
1205/**
1206 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1207 *
1208 * This is mostly a copy of iemInitDecoder.
1209 *
1210 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1211 */
1212DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1213{
1214 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1215
1216 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1217
1218#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1227#endif
1228
1229 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1230#ifdef IEM_VERIFICATION_MODE_FULL
1231 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1232 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1233#endif
1234 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1235 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1236 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1237 pVCpu->iem.s.enmEffAddrMode = enmMode;
1238 if (enmMode != IEMMODE_64BIT)
1239 {
1240 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1241 pVCpu->iem.s.enmEffOpSize = enmMode;
1242 }
1243 else
1244 {
1245 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1246 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1247 }
1248 pVCpu->iem.s.fPrefixes = 0;
1249 pVCpu->iem.s.uRexReg = 0;
1250 pVCpu->iem.s.uRexB = 0;
1251 pVCpu->iem.s.uRexIndex = 0;
1252 pVCpu->iem.s.idxPrefix = 0;
1253 pVCpu->iem.s.uVex3rdReg = 0;
1254 pVCpu->iem.s.uVexLength = 0;
1255 pVCpu->iem.s.fEvexStuff = 0;
1256 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1257#ifdef IEM_WITH_CODE_TLB
1258 if (pVCpu->iem.s.pbInstrBuf)
1259 {
1260 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1261 - pVCpu->iem.s.uInstrBufPc;
1262 if (off < pVCpu->iem.s.cbInstrBufTotal)
1263 {
1264 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1265 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1266 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1267 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1268 else
1269 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1270 }
1271 else
1272 {
1273 pVCpu->iem.s.pbInstrBuf = NULL;
1274 pVCpu->iem.s.offInstrNextByte = 0;
1275 pVCpu->iem.s.offCurInstrStart = 0;
1276 pVCpu->iem.s.cbInstrBuf = 0;
1277 pVCpu->iem.s.cbInstrBufTotal = 0;
1278 }
1279 }
1280 else
1281 {
1282 pVCpu->iem.s.offInstrNextByte = 0;
1283 pVCpu->iem.s.offCurInstrStart = 0;
1284 pVCpu->iem.s.cbInstrBuf = 0;
1285 pVCpu->iem.s.cbInstrBufTotal = 0;
1286 }
1287#else
1288 pVCpu->iem.s.cbOpcode = 0;
1289 pVCpu->iem.s.offOpcode = 0;
1290#endif
1291 Assert(pVCpu->iem.s.cActiveMappings == 0);
1292 pVCpu->iem.s.iNextMapping = 0;
1293 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1294 Assert(pVCpu->iem.s.fBypassHandlers == false);
1295#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1296 if (!pVCpu->iem.s.fInPatchCode)
1297 { /* likely */ }
1298 else
1299 {
1300 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1301 && pCtx->cs.u64Base == 0
1302 && pCtx->cs.u32Limit == UINT32_MAX
1303 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1304 if (!pVCpu->iem.s.fInPatchCode)
1305 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1306 }
1307#endif
1308
1309#ifdef DBGFTRACE_ENABLED
1310 switch (enmMode)
1311 {
1312 case IEMMODE_64BIT:
1313 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1314 break;
1315 case IEMMODE_32BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1317 break;
1318 case IEMMODE_16BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 }
1322#endif
1323}
1324
1325
1326
1327/**
1328 * Prefetch opcodes the first time when starting executing.
1329 *
1330 * @returns Strict VBox status code.
1331 * @param pVCpu The cross context virtual CPU structure of the
1332 * calling thread.
1333 * @param fBypassHandlers Whether to bypass access handlers.
1334 */
1335IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1336{
1337#ifdef IEM_VERIFICATION_MODE_FULL
1338 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1339#endif
1340 iemInitDecoder(pVCpu, fBypassHandlers);
1341
1342#ifdef IEM_WITH_CODE_TLB
1343 /** @todo Do ITLB lookup here. */
1344
1345#else /* !IEM_WITH_CODE_TLB */
1346
1347 /*
1348 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1349 *
1350 * First translate CS:rIP to a physical address.
1351 */
1352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1353 uint32_t cbToTryRead;
1354 RTGCPTR GCPtrPC;
1355 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1356 {
1357 cbToTryRead = PAGE_SIZE;
1358 GCPtrPC = pCtx->rip;
1359 if (IEM_IS_CANONICAL(GCPtrPC))
1360 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1361 else
1362 return iemRaiseGeneralProtectionFault0(pVCpu);
1363 }
1364 else
1365 {
1366 uint32_t GCPtrPC32 = pCtx->eip;
1367 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1368 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1369 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1370 else
1371 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1372 if (cbToTryRead) { /* likely */ }
1373 else /* overflowed */
1374 {
1375 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1376 cbToTryRead = UINT32_MAX;
1377 }
1378 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1379 Assert(GCPtrPC <= UINT32_MAX);
1380 }
1381
1382# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1383 /* Allow interpretation of patch manager code blocks since they can for
1384 instance throw #PFs for perfectly good reasons. */
1385 if (pVCpu->iem.s.fInPatchCode)
1386 {
1387 size_t cbRead = 0;
1388 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1389 AssertRCReturn(rc, rc);
1390 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1391 return VINF_SUCCESS;
1392 }
1393# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1394
1395 RTGCPHYS GCPhys;
1396 uint64_t fFlags;
1397 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1398 if (RT_SUCCESS(rc)) { /* probable */ }
1399 else
1400 {
1401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1402 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1403 }
1404 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1405 else
1406 {
1407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1408 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1409 }
1410 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1411 else
1412 {
1413 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1414 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1415 }
1416 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1417 /** @todo Check reserved bits and such stuff. PGM is better at doing
1418 * that, so do it when implementing the guest virtual address
1419 * TLB... */
1420
1421# ifdef IEM_VERIFICATION_MODE_FULL
1422 /*
1423 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1424 * instruction.
1425 */
1426 /** @todo optimize this differently by not using PGMPhysRead. */
1427 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1428 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1429 if ( offPrevOpcodes < cbOldOpcodes
1430 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1431 {
1432 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1433 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1434 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1435 pVCpu->iem.s.cbOpcode = cbNew;
1436 return VINF_SUCCESS;
1437 }
1438# endif
1439
1440 /*
1441 * Read the bytes at this address.
1442 */
1443 PVM pVM = pVCpu->CTX_SUFF(pVM);
1444# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1445 size_t cbActual;
1446 if ( PATMIsEnabled(pVM)
1447 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1448 {
1449 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1450 Assert(cbActual > 0);
1451 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1452 }
1453 else
1454# endif
1455 {
1456 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1457 if (cbToTryRead > cbLeftOnPage)
1458 cbToTryRead = cbLeftOnPage;
1459 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1460 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1461
1462 if (!pVCpu->iem.s.fBypassHandlers)
1463 {
1464 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1465 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1466 { /* likely */ }
1467 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1468 {
1469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1470 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1471 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1472 }
1473 else
1474 {
1475 Log((RT_SUCCESS(rcStrict)
1476 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1477 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1478 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1479 return rcStrict;
1480 }
1481 }
1482 else
1483 {
1484 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1485 if (RT_SUCCESS(rc))
1486 { /* likely */ }
1487 else
1488 {
1489 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1490 GCPtrPC, GCPhys, rc, cbToTryRead));
1491 return rc;
1492 }
1493 }
1494 pVCpu->iem.s.cbOpcode = cbToTryRead;
1495 }
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664//__debugbreak();
1665 for (;;)
1666 {
1667 Assert(cbDst <= 8);
1668 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1669
1670 /*
1671 * We might have a partial buffer match, deal with that first to make the
1672 * rest simpler. This is the first part of the cross page/buffer case.
1673 */
1674 if (pVCpu->iem.s.pbInstrBuf != NULL)
1675 {
1676 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1677 {
1678 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1679 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1680 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1681
1682 cbDst -= cbCopy;
1683 pvDst = (uint8_t *)pvDst + cbCopy;
1684 offBuf += cbCopy;
1685 pVCpu->iem.s.offInstrNextByte += offBuf;
1686 }
1687 }
1688
1689 /*
1690 * Check segment limit, figuring how much we're allowed to access at this point.
1691 *
1692 * We will fault immediately if RIP is past the segment limit / in non-canonical
1693 * territory. If we do continue, there are one or more bytes to read before we
1694 * end up in trouble and we need to do that first before faulting.
1695 */
1696 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1697 RTGCPTR GCPtrFirst;
1698 uint32_t cbMaxRead;
1699 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1700 {
1701 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1702 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1703 { /* likely */ }
1704 else
1705 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1706 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1707 }
1708 else
1709 {
1710 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1711 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1712 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1713 { /* likely */ }
1714 else
1715 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1716 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1717 if (cbMaxRead != 0)
1718 { /* likely */ }
1719 else
1720 {
1721 /* Overflowed because address is 0 and limit is max. */
1722 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1723 cbMaxRead = X86_PAGE_SIZE;
1724 }
1725 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1726 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1727 if (cbMaxRead2 < cbMaxRead)
1728 cbMaxRead = cbMaxRead2;
1729 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1730 }
1731
1732 /*
1733 * Get the TLB entry for this piece of code.
1734 */
1735 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1736 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1737 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1738 if (pTlbe->uTag == uTag)
1739 {
1740 /* likely when executing lots of code, otherwise unlikely */
1741# ifdef VBOX_WITH_STATISTICS
1742 pVCpu->iem.s.CodeTlb.cTlbHits++;
1743# endif
1744 }
1745 else
1746 {
1747 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1748# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1749 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1750 {
1751 pTlbe->uTag = uTag;
1752 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1753 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1754 pTlbe->GCPhys = NIL_RTGCPHYS;
1755 pTlbe->pbMappingR3 = NULL;
1756 }
1757 else
1758# endif
1759 {
1760 RTGCPHYS GCPhys;
1761 uint64_t fFlags;
1762 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1763 if (RT_FAILURE(rc))
1764 {
1765 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1766 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1767 }
1768
1769 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1770 pTlbe->uTag = uTag;
1771 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1772 pTlbe->GCPhys = GCPhys;
1773 pTlbe->pbMappingR3 = NULL;
1774 }
1775 }
1776
1777 /*
1778 * Check TLB page table level access flags.
1779 */
1780 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1781 {
1782 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1783 {
1784 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1785 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1786 }
1787 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1788 {
1789 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1791 }
1792 }
1793
1794# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1795 /*
1796 * Allow interpretation of patch manager code blocks since they can for
1797 * instance throw #PFs for perfectly good reasons.
1798 */
1799 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1800 { /* no unlikely */ }
1801 else
1802 {
1803 /** @todo Could be optimized this a little in ring-3 if we liked. */
1804 size_t cbRead = 0;
1805 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1806 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1807 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1808 return;
1809 }
1810# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1811
1812 /*
1813 * Look up the physical page info if necessary.
1814 */
1815 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1816 { /* not necessary */ }
1817 else
1818 {
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1821 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1822 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1823 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1824 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1825 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1826 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1827 }
1828
1829# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1830 /*
1831 * Try do a direct read using the pbMappingR3 pointer.
1832 */
1833 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1834 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1835 {
1836 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1837 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1838 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1839 {
1840 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1841 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1842 }
1843 else
1844 {
1845 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1846 Assert(cbInstr < cbMaxRead);
1847 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1848 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1849 }
1850 if (cbDst <= cbMaxRead)
1851 {
1852 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1853 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1854 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1855 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1856 return;
1857 }
1858 pVCpu->iem.s.pbInstrBuf = NULL;
1859
1860 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1861 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1862 }
1863 else
1864# endif
1865#if 0
1866 /*
1867 * If there is no special read handling, so we can read a bit more and
1868 * put it in the prefetch buffer.
1869 */
1870 if ( cbDst < cbMaxRead
1871 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1872 {
1873 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1874 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1875 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1876 { /* likely */ }
1877 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1878 {
1879 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1880 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1882 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1883 }
1884 else
1885 {
1886 Log((RT_SUCCESS(rcStrict)
1887 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1888 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1889 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1890 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1891 }
1892 }
1893 /*
1894 * Special read handling, so only read exactly what's needed.
1895 * This is a highly unlikely scenario.
1896 */
1897 else
1898#endif
1899 {
1900 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1901 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1902 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1903 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1904 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1905 { /* likely */ }
1906 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1907 {
1908 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1909 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1911 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1912 }
1913 else
1914 {
1915 Log((RT_SUCCESS(rcStrict)
1916 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1917 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1918 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1919 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1920 }
1921 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1922 if (cbToRead == cbDst)
1923 return;
1924 }
1925
1926 /*
1927 * More to read, loop.
1928 */
1929 cbDst -= cbMaxRead;
1930 pvDst = (uint8_t *)pvDst + cbMaxRead;
1931 }
1932#else
1933 RT_NOREF(pvDst, cbDst);
1934 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1935#endif
1936}
1937
1938#else
1939
1940/**
1941 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1942 * exception if it fails.
1943 *
1944 * @returns Strict VBox status code.
1945 * @param pVCpu The cross context virtual CPU structure of the
1946 * calling thread.
1947 * @param cbMin The minimum number of bytes relative offOpcode
1948 * that must be read.
1949 */
1950IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1951{
1952 /*
1953 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1954 *
1955 * First translate CS:rIP to a physical address.
1956 */
1957 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1958 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1959 uint32_t cbToTryRead;
1960 RTGCPTR GCPtrNext;
1961 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1962 {
1963 cbToTryRead = PAGE_SIZE;
1964 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1965 if (!IEM_IS_CANONICAL(GCPtrNext))
1966 return iemRaiseGeneralProtectionFault0(pVCpu);
1967 }
1968 else
1969 {
1970 uint32_t GCPtrNext32 = pCtx->eip;
1971 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1972 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1973 if (GCPtrNext32 > pCtx->cs.u32Limit)
1974 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1975 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1976 if (!cbToTryRead) /* overflowed */
1977 {
1978 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1979 cbToTryRead = UINT32_MAX;
1980 /** @todo check out wrapping around the code segment. */
1981 }
1982 if (cbToTryRead < cbMin - cbLeft)
1983 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1984 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1985 }
1986
1987 /* Only read up to the end of the page, and make sure we don't read more
1988 than the opcode buffer can hold. */
1989 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1990 if (cbToTryRead > cbLeftOnPage)
1991 cbToTryRead = cbLeftOnPage;
1992 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1993 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1994/** @todo r=bird: Convert assertion into undefined opcode exception? */
1995 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1996
1997# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1998 /* Allow interpretation of patch manager code blocks since they can for
1999 instance throw #PFs for perfectly good reasons. */
2000 if (pVCpu->iem.s.fInPatchCode)
2001 {
2002 size_t cbRead = 0;
2003 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2004 AssertRCReturn(rc, rc);
2005 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2006 return VINF_SUCCESS;
2007 }
2008# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2009
2010 RTGCPHYS GCPhys;
2011 uint64_t fFlags;
2012 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2013 if (RT_FAILURE(rc))
2014 {
2015 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2016 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2017 }
2018 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2019 {
2020 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2021 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2022 }
2023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2024 {
2025 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2027 }
2028 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2029 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2030 /** @todo Check reserved bits and such stuff. PGM is better at doing
2031 * that, so do it when implementing the guest virtual address
2032 * TLB... */
2033
2034 /*
2035 * Read the bytes at this address.
2036 *
2037 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2038 * and since PATM should only patch the start of an instruction there
2039 * should be no need to check again here.
2040 */
2041 if (!pVCpu->iem.s.fBypassHandlers)
2042 {
2043 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2044 cbToTryRead, PGMACCESSORIGIN_IEM);
2045 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2046 { /* likely */ }
2047 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2048 {
2049 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2050 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2051 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2052 }
2053 else
2054 {
2055 Log((RT_SUCCESS(rcStrict)
2056 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2057 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2058 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2059 return rcStrict;
2060 }
2061 }
2062 else
2063 {
2064 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2065 if (RT_SUCCESS(rc))
2066 { /* likely */ }
2067 else
2068 {
2069 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2070 return rc;
2071 }
2072 }
2073 pVCpu->iem.s.cbOpcode += cbToTryRead;
2074 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2075
2076 return VINF_SUCCESS;
2077}
2078
2079#endif /* !IEM_WITH_CODE_TLB */
2080#ifndef IEM_WITH_SETJMP
2081
2082/**
2083 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2084 *
2085 * @returns Strict VBox status code.
2086 * @param pVCpu The cross context virtual CPU structure of the
2087 * calling thread.
2088 * @param pb Where to return the opcode byte.
2089 */
2090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2091{
2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2093 if (rcStrict == VINF_SUCCESS)
2094 {
2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2096 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2097 pVCpu->iem.s.offOpcode = offOpcode + 1;
2098 }
2099 else
2100 *pb = 0;
2101 return rcStrict;
2102}
2103
2104
2105/**
2106 * Fetches the next opcode byte.
2107 *
2108 * @returns Strict VBox status code.
2109 * @param pVCpu The cross context virtual CPU structure of the
2110 * calling thread.
2111 * @param pu8 Where to return the opcode byte.
2112 */
2113DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2114{
2115 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2116 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2117 {
2118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2119 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2120 return VINF_SUCCESS;
2121 }
2122 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2123}
2124
2125#else /* IEM_WITH_SETJMP */
2126
2127/**
2128 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2129 *
2130 * @returns The opcode byte.
2131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2132 */
2133DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2134{
2135# ifdef IEM_WITH_CODE_TLB
2136 uint8_t u8;
2137 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2138 return u8;
2139# else
2140 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2141 if (rcStrict == VINF_SUCCESS)
2142 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2143 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2144# endif
2145}
2146
2147
2148/**
2149 * Fetches the next opcode byte, longjmp on error.
2150 *
2151 * @returns The opcode byte.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 */
2154DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2155{
2156# ifdef IEM_WITH_CODE_TLB
2157 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2158 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2159 if (RT_LIKELY( pbBuf != NULL
2160 && offBuf < pVCpu->iem.s.cbInstrBuf))
2161 {
2162 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2163 return pbBuf[offBuf];
2164 }
2165# else
2166 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2167 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2168 {
2169 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2170 return pVCpu->iem.s.abOpcode[offOpcode];
2171 }
2172# endif
2173 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2174}
2175
2176#endif /* IEM_WITH_SETJMP */
2177
2178/**
2179 * Fetches the next opcode byte, returns automatically on failure.
2180 *
2181 * @param a_pu8 Where to return the opcode byte.
2182 * @remark Implicitly references pVCpu.
2183 */
2184#ifndef IEM_WITH_SETJMP
2185# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2186 do \
2187 { \
2188 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2189 if (rcStrict2 == VINF_SUCCESS) \
2190 { /* likely */ } \
2191 else \
2192 return rcStrict2; \
2193 } while (0)
2194#else
2195# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2196#endif /* IEM_WITH_SETJMP */
2197
2198
2199#ifndef IEM_WITH_SETJMP
2200/**
2201 * Fetches the next signed byte from the opcode stream.
2202 *
2203 * @returns Strict VBox status code.
2204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2205 * @param pi8 Where to return the signed byte.
2206 */
2207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2208{
2209 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2210}
2211#endif /* !IEM_WITH_SETJMP */
2212
2213
2214/**
2215 * Fetches the next signed byte from the opcode stream, returning automatically
2216 * on failure.
2217 *
2218 * @param a_pi8 Where to return the signed byte.
2219 * @remark Implicitly references pVCpu.
2220 */
2221#ifndef IEM_WITH_SETJMP
2222# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2223 do \
2224 { \
2225 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2226 if (rcStrict2 != VINF_SUCCESS) \
2227 return rcStrict2; \
2228 } while (0)
2229#else /* IEM_WITH_SETJMP */
2230# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2231
2232#endif /* IEM_WITH_SETJMP */
2233
2234#ifndef IEM_WITH_SETJMP
2235
2236/**
2237 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2238 *
2239 * @returns Strict VBox status code.
2240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2241 * @param pu16 Where to return the opcode dword.
2242 */
2243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2244{
2245 uint8_t u8;
2246 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2247 if (rcStrict == VINF_SUCCESS)
2248 *pu16 = (int8_t)u8;
2249 return rcStrict;
2250}
2251
2252
2253/**
2254 * Fetches the next signed byte from the opcode stream, extending it to
2255 * unsigned 16-bit.
2256 *
2257 * @returns Strict VBox status code.
2258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2259 * @param pu16 Where to return the unsigned word.
2260 */
2261DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2262{
2263 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2264 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2265 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2266
2267 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2268 pVCpu->iem.s.offOpcode = offOpcode + 1;
2269 return VINF_SUCCESS;
2270}
2271
2272#endif /* !IEM_WITH_SETJMP */
2273
2274/**
2275 * Fetches the next signed byte from the opcode stream and sign-extending it to
2276 * a word, returning automatically on failure.
2277 *
2278 * @param a_pu16 Where to return the word.
2279 * @remark Implicitly references pVCpu.
2280 */
2281#ifndef IEM_WITH_SETJMP
2282# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2283 do \
2284 { \
2285 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2286 if (rcStrict2 != VINF_SUCCESS) \
2287 return rcStrict2; \
2288 } while (0)
2289#else
2290# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2291#endif
2292
2293#ifndef IEM_WITH_SETJMP
2294
2295/**
2296 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2297 *
2298 * @returns Strict VBox status code.
2299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2300 * @param pu32 Where to return the opcode dword.
2301 */
2302DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2303{
2304 uint8_t u8;
2305 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2306 if (rcStrict == VINF_SUCCESS)
2307 *pu32 = (int8_t)u8;
2308 return rcStrict;
2309}
2310
2311
2312/**
2313 * Fetches the next signed byte from the opcode stream, extending it to
2314 * unsigned 32-bit.
2315 *
2316 * @returns Strict VBox status code.
2317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2318 * @param pu32 Where to return the unsigned dword.
2319 */
2320DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2321{
2322 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2323 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2324 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2325
2326 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2327 pVCpu->iem.s.offOpcode = offOpcode + 1;
2328 return VINF_SUCCESS;
2329}
2330
2331#endif /* !IEM_WITH_SETJMP */
2332
2333/**
2334 * Fetches the next signed byte from the opcode stream and sign-extending it to
2335 * a word, returning automatically on failure.
2336 *
2337 * @param a_pu32 Where to return the word.
2338 * @remark Implicitly references pVCpu.
2339 */
2340#ifndef IEM_WITH_SETJMP
2341#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2342 do \
2343 { \
2344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2345 if (rcStrict2 != VINF_SUCCESS) \
2346 return rcStrict2; \
2347 } while (0)
2348#else
2349# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2350#endif
2351
2352#ifndef IEM_WITH_SETJMP
2353
2354/**
2355 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2356 *
2357 * @returns Strict VBox status code.
2358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2359 * @param pu64 Where to return the opcode qword.
2360 */
2361DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2362{
2363 uint8_t u8;
2364 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2365 if (rcStrict == VINF_SUCCESS)
2366 *pu64 = (int8_t)u8;
2367 return rcStrict;
2368}
2369
2370
2371/**
2372 * Fetches the next signed byte from the opcode stream, extending it to
2373 * unsigned 64-bit.
2374 *
2375 * @returns Strict VBox status code.
2376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2377 * @param pu64 Where to return the unsigned qword.
2378 */
2379DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2380{
2381 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2382 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2383 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2384
2385 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2386 pVCpu->iem.s.offOpcode = offOpcode + 1;
2387 return VINF_SUCCESS;
2388}
2389
2390#endif /* !IEM_WITH_SETJMP */
2391
2392
2393/**
2394 * Fetches the next signed byte from the opcode stream and sign-extending it to
2395 * a word, returning automatically on failure.
2396 *
2397 * @param a_pu64 Where to return the word.
2398 * @remark Implicitly references pVCpu.
2399 */
2400#ifndef IEM_WITH_SETJMP
2401# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2402 do \
2403 { \
2404 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2405 if (rcStrict2 != VINF_SUCCESS) \
2406 return rcStrict2; \
2407 } while (0)
2408#else
2409# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2410#endif
2411
2412
2413#ifndef IEM_WITH_SETJMP
2414
2415/**
2416 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2417 *
2418 * @returns Strict VBox status code.
2419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2420 * @param pu16 Where to return the opcode word.
2421 */
2422DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2423{
2424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2425 if (rcStrict == VINF_SUCCESS)
2426 {
2427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2428# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2429 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2430# else
2431 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2432# endif
2433 pVCpu->iem.s.offOpcode = offOpcode + 2;
2434 }
2435 else
2436 *pu16 = 0;
2437 return rcStrict;
2438}
2439
2440
2441/**
2442 * Fetches the next opcode word.
2443 *
2444 * @returns Strict VBox status code.
2445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2446 * @param pu16 Where to return the opcode word.
2447 */
2448DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2449{
2450 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2451 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2452 {
2453 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2454# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2455 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2456# else
2457 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2458# endif
2459 return VINF_SUCCESS;
2460 }
2461 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2462}
2463
2464#else /* IEM_WITH_SETJMP */
2465
2466/**
2467 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2468 *
2469 * @returns The opcode word.
2470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2471 */
2472DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2473{
2474# ifdef IEM_WITH_CODE_TLB
2475 uint16_t u16;
2476 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2477 return u16;
2478# else
2479 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2480 if (rcStrict == VINF_SUCCESS)
2481 {
2482 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offOpcode += 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 }
2490 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2491# endif
2492}
2493
2494
2495/**
2496 * Fetches the next opcode word, longjmp on error.
2497 *
2498 * @returns The opcode word.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 */
2501DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2502{
2503# ifdef IEM_WITH_CODE_TLB
2504 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2505 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2506 if (RT_LIKELY( pbBuf != NULL
2507 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2508 {
2509 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2510# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2511 return *(uint16_t const *)&pbBuf[offBuf];
2512# else
2513 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2514# endif
2515 }
2516# else
2517 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2518 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2519 {
2520 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2521# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2522 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2523# else
2524 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2525# endif
2526 }
2527# endif
2528 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2529}
2530
2531#endif /* IEM_WITH_SETJMP */
2532
2533
2534/**
2535 * Fetches the next opcode word, returns automatically on failure.
2536 *
2537 * @param a_pu16 Where to return the opcode word.
2538 * @remark Implicitly references pVCpu.
2539 */
2540#ifndef IEM_WITH_SETJMP
2541# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2542 do \
2543 { \
2544 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2545 if (rcStrict2 != VINF_SUCCESS) \
2546 return rcStrict2; \
2547 } while (0)
2548#else
2549# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2550#endif
2551
2552#ifndef IEM_WITH_SETJMP
2553
2554/**
2555 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2556 *
2557 * @returns Strict VBox status code.
2558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2559 * @param pu32 Where to return the opcode double word.
2560 */
2561DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2562{
2563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2564 if (rcStrict == VINF_SUCCESS)
2565 {
2566 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2567 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu32 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word, zero extending it to a double word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu32 Where to return the opcode double word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2584{
2585 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2587 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2588
2589 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590 pVCpu->iem.s.offOpcode = offOpcode + 2;
2591 return VINF_SUCCESS;
2592}
2593
2594#endif /* !IEM_WITH_SETJMP */
2595
2596
2597/**
2598 * Fetches the next opcode word and zero extends it to a double word, returns
2599 * automatically on failure.
2600 *
2601 * @param a_pu32 Where to return the opcode double word.
2602 * @remark Implicitly references pVCpu.
2603 */
2604#ifndef IEM_WITH_SETJMP
2605# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2606 do \
2607 { \
2608 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2609 if (rcStrict2 != VINF_SUCCESS) \
2610 return rcStrict2; \
2611 } while (0)
2612#else
2613# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2614#endif
2615
2616#ifndef IEM_WITH_SETJMP
2617
2618/**
2619 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2620 *
2621 * @returns Strict VBox status code.
2622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2623 * @param pu64 Where to return the opcode quad word.
2624 */
2625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2626{
2627 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2628 if (rcStrict == VINF_SUCCESS)
2629 {
2630 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2631 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2632 pVCpu->iem.s.offOpcode = offOpcode + 2;
2633 }
2634 else
2635 *pu64 = 0;
2636 return rcStrict;
2637}
2638
2639
2640/**
2641 * Fetches the next opcode word, zero extending it to a quad word.
2642 *
2643 * @returns Strict VBox status code.
2644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2645 * @param pu64 Where to return the opcode quad word.
2646 */
2647DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2648{
2649 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2650 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2651 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2652
2653 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2654 pVCpu->iem.s.offOpcode = offOpcode + 2;
2655 return VINF_SUCCESS;
2656}
2657
2658#endif /* !IEM_WITH_SETJMP */
2659
2660/**
2661 * Fetches the next opcode word and zero extends it to a quad word, returns
2662 * automatically on failure.
2663 *
2664 * @param a_pu64 Where to return the opcode quad word.
2665 * @remark Implicitly references pVCpu.
2666 */
2667#ifndef IEM_WITH_SETJMP
2668# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2669 do \
2670 { \
2671 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2672 if (rcStrict2 != VINF_SUCCESS) \
2673 return rcStrict2; \
2674 } while (0)
2675#else
2676# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2677#endif
2678
2679
2680#ifndef IEM_WITH_SETJMP
2681/**
2682 * Fetches the next signed word from the opcode stream.
2683 *
2684 * @returns Strict VBox status code.
2685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2686 * @param pi16 Where to return the signed word.
2687 */
2688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2689{
2690 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2691}
2692#endif /* !IEM_WITH_SETJMP */
2693
2694
2695/**
2696 * Fetches the next signed word from the opcode stream, returning automatically
2697 * on failure.
2698 *
2699 * @param a_pi16 Where to return the signed word.
2700 * @remark Implicitly references pVCpu.
2701 */
2702#ifndef IEM_WITH_SETJMP
2703# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2704 do \
2705 { \
2706 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2707 if (rcStrict2 != VINF_SUCCESS) \
2708 return rcStrict2; \
2709 } while (0)
2710#else
2711# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2712#endif
2713
2714#ifndef IEM_WITH_SETJMP
2715
2716/**
2717 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2718 *
2719 * @returns Strict VBox status code.
2720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2721 * @param pu32 Where to return the opcode dword.
2722 */
2723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2724{
2725 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2726 if (rcStrict == VINF_SUCCESS)
2727 {
2728 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2729# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2730 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2731# else
2732 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2733 pVCpu->iem.s.abOpcode[offOpcode + 1],
2734 pVCpu->iem.s.abOpcode[offOpcode + 2],
2735 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2736# endif
2737 pVCpu->iem.s.offOpcode = offOpcode + 4;
2738 }
2739 else
2740 *pu32 = 0;
2741 return rcStrict;
2742}
2743
2744
2745/**
2746 * Fetches the next opcode dword.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2750 * @param pu32 Where to return the opcode double word.
2751 */
2752DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2753{
2754 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2755 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2756 {
2757 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2758# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2759 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2760# else
2761 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2762 pVCpu->iem.s.abOpcode[offOpcode + 1],
2763 pVCpu->iem.s.abOpcode[offOpcode + 2],
2764 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2765# endif
2766 return VINF_SUCCESS;
2767 }
2768 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2769}
2770
2771#else /* !IEM_WITH_SETJMP */
2772
2773/**
2774 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2775 *
2776 * @returns The opcode dword.
2777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2778 */
2779DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2780{
2781# ifdef IEM_WITH_CODE_TLB
2782 uint32_t u32;
2783 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2784 return u32;
2785# else
2786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2787 if (rcStrict == VINF_SUCCESS)
2788 {
2789 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2790 pVCpu->iem.s.offOpcode = offOpcode + 4;
2791# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2792 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2793# else
2794 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2795 pVCpu->iem.s.abOpcode[offOpcode + 1],
2796 pVCpu->iem.s.abOpcode[offOpcode + 2],
2797 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2798# endif
2799 }
2800 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2801# endif
2802}
2803
2804
2805/**
2806 * Fetches the next opcode dword, longjmp on error.
2807 *
2808 * @returns The opcode dword.
2809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2810 */
2811DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2812{
2813# ifdef IEM_WITH_CODE_TLB
2814 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2815 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2816 if (RT_LIKELY( pbBuf != NULL
2817 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2818 {
2819 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2820# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2821 return *(uint32_t const *)&pbBuf[offBuf];
2822# else
2823 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2824 pbBuf[offBuf + 1],
2825 pbBuf[offBuf + 2],
2826 pbBuf[offBuf + 3]);
2827# endif
2828 }
2829# else
2830 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2831 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2832 {
2833 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2834# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2835 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2836# else
2837 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2838 pVCpu->iem.s.abOpcode[offOpcode + 1],
2839 pVCpu->iem.s.abOpcode[offOpcode + 2],
2840 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2841# endif
2842 }
2843# endif
2844 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2845}
2846
2847#endif /* !IEM_WITH_SETJMP */
2848
2849
2850/**
2851 * Fetches the next opcode dword, returns automatically on failure.
2852 *
2853 * @param a_pu32 Where to return the opcode dword.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode dword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, zero extending it to a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2907
2908 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 pVCpu->iem.s.offOpcode = offOpcode + 4;
2913 return VINF_SUCCESS;
2914}
2915
2916#endif /* !IEM_WITH_SETJMP */
2917
2918
2919/**
2920 * Fetches the next opcode dword and zero extends it to a quad word, returns
2921 * automatically on failure.
2922 *
2923 * @param a_pu64 Where to return the opcode quad word.
2924 * @remark Implicitly references pVCpu.
2925 */
2926#ifndef IEM_WITH_SETJMP
2927# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2928 do \
2929 { \
2930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2931 if (rcStrict2 != VINF_SUCCESS) \
2932 return rcStrict2; \
2933 } while (0)
2934#else
2935# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2936#endif
2937
2938
2939#ifndef IEM_WITH_SETJMP
2940/**
2941 * Fetches the next signed double word from the opcode stream.
2942 *
2943 * @returns Strict VBox status code.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param pi32 Where to return the signed double word.
2946 */
2947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2948{
2949 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2950}
2951#endif
2952
2953/**
2954 * Fetches the next signed double word from the opcode stream, returning
2955 * automatically on failure.
2956 *
2957 * @param a_pi32 Where to return the signed double word.
2958 * @remark Implicitly references pVCpu.
2959 */
2960#ifndef IEM_WITH_SETJMP
2961# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2962 do \
2963 { \
2964 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2965 if (rcStrict2 != VINF_SUCCESS) \
2966 return rcStrict2; \
2967 } while (0)
2968#else
2969# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2970#endif
2971
2972#ifndef IEM_WITH_SETJMP
2973
2974/**
2975 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2984 if (rcStrict == VINF_SUCCESS)
2985 {
2986 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2987 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2988 pVCpu->iem.s.abOpcode[offOpcode + 1],
2989 pVCpu->iem.s.abOpcode[offOpcode + 2],
2990 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2991 pVCpu->iem.s.offOpcode = offOpcode + 4;
2992 }
2993 else
2994 *pu64 = 0;
2995 return rcStrict;
2996}
2997
2998
2999/**
3000 * Fetches the next opcode dword, sign extending it into a quad word.
3001 *
3002 * @returns Strict VBox status code.
3003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3004 * @param pu64 Where to return the opcode quad word.
3005 */
3006DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3007{
3008 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3009 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3010 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3011
3012 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3013 pVCpu->iem.s.abOpcode[offOpcode + 1],
3014 pVCpu->iem.s.abOpcode[offOpcode + 2],
3015 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3016 *pu64 = i32;
3017 pVCpu->iem.s.offOpcode = offOpcode + 4;
3018 return VINF_SUCCESS;
3019}
3020
3021#endif /* !IEM_WITH_SETJMP */
3022
3023
3024/**
3025 * Fetches the next opcode double word and sign extends it to a quad word,
3026 * returns automatically on failure.
3027 *
3028 * @param a_pu64 Where to return the opcode quad word.
3029 * @remark Implicitly references pVCpu.
3030 */
3031#ifndef IEM_WITH_SETJMP
3032# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3033 do \
3034 { \
3035 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3036 if (rcStrict2 != VINF_SUCCESS) \
3037 return rcStrict2; \
3038 } while (0)
3039#else
3040# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3041#endif
3042
3043#ifndef IEM_WITH_SETJMP
3044
3045/**
3046 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3047 *
3048 * @returns Strict VBox status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param pu64 Where to return the opcode qword.
3051 */
3052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3053{
3054 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3055 if (rcStrict == VINF_SUCCESS)
3056 {
3057 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3058# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3059 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3060# else
3061 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3062 pVCpu->iem.s.abOpcode[offOpcode + 1],
3063 pVCpu->iem.s.abOpcode[offOpcode + 2],
3064 pVCpu->iem.s.abOpcode[offOpcode + 3],
3065 pVCpu->iem.s.abOpcode[offOpcode + 4],
3066 pVCpu->iem.s.abOpcode[offOpcode + 5],
3067 pVCpu->iem.s.abOpcode[offOpcode + 6],
3068 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3069# endif
3070 pVCpu->iem.s.offOpcode = offOpcode + 8;
3071 }
3072 else
3073 *pu64 = 0;
3074 return rcStrict;
3075}
3076
3077
3078/**
3079 * Fetches the next opcode qword.
3080 *
3081 * @returns Strict VBox status code.
3082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3083 * @param pu64 Where to return the opcode qword.
3084 */
3085DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3086{
3087 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3088 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3089 {
3090# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3091 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3092# else
3093 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3094 pVCpu->iem.s.abOpcode[offOpcode + 1],
3095 pVCpu->iem.s.abOpcode[offOpcode + 2],
3096 pVCpu->iem.s.abOpcode[offOpcode + 3],
3097 pVCpu->iem.s.abOpcode[offOpcode + 4],
3098 pVCpu->iem.s.abOpcode[offOpcode + 5],
3099 pVCpu->iem.s.abOpcode[offOpcode + 6],
3100 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3101# endif
3102 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3103 return VINF_SUCCESS;
3104 }
3105 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3106}
3107
3108#else /* IEM_WITH_SETJMP */
3109
3110/**
3111 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3112 *
3113 * @returns The opcode qword.
3114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3117{
3118# ifdef IEM_WITH_CODE_TLB
3119 uint64_t u64;
3120 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3121 return u64;
3122# else
3123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3124 if (rcStrict == VINF_SUCCESS)
3125 {
3126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3127 pVCpu->iem.s.offOpcode = offOpcode + 8;
3128# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3129 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3130# else
3131 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3132 pVCpu->iem.s.abOpcode[offOpcode + 1],
3133 pVCpu->iem.s.abOpcode[offOpcode + 2],
3134 pVCpu->iem.s.abOpcode[offOpcode + 3],
3135 pVCpu->iem.s.abOpcode[offOpcode + 4],
3136 pVCpu->iem.s.abOpcode[offOpcode + 5],
3137 pVCpu->iem.s.abOpcode[offOpcode + 6],
3138 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3139# endif
3140 }
3141 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3142# endif
3143}
3144
3145
3146/**
3147 * Fetches the next opcode qword, longjmp on error.
3148 *
3149 * @returns The opcode qword.
3150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3151 */
3152DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3153{
3154# ifdef IEM_WITH_CODE_TLB
3155 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3156 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3157 if (RT_LIKELY( pbBuf != NULL
3158 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3159 {
3160 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3161# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3162 return *(uint64_t const *)&pbBuf[offBuf];
3163# else
3164 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3165 pbBuf[offBuf + 1],
3166 pbBuf[offBuf + 2],
3167 pbBuf[offBuf + 3],
3168 pbBuf[offBuf + 4],
3169 pbBuf[offBuf + 5],
3170 pbBuf[offBuf + 6],
3171 pbBuf[offBuf + 7]);
3172# endif
3173 }
3174# else
3175 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3176 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3177 {
3178 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3179# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3180 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3181# else
3182 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3183 pVCpu->iem.s.abOpcode[offOpcode + 1],
3184 pVCpu->iem.s.abOpcode[offOpcode + 2],
3185 pVCpu->iem.s.abOpcode[offOpcode + 3],
3186 pVCpu->iem.s.abOpcode[offOpcode + 4],
3187 pVCpu->iem.s.abOpcode[offOpcode + 5],
3188 pVCpu->iem.s.abOpcode[offOpcode + 6],
3189 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3190# endif
3191 }
3192# endif
3193 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3194}
3195
3196#endif /* IEM_WITH_SETJMP */
3197
3198/**
3199 * Fetches the next opcode quad word, returns automatically on failure.
3200 *
3201 * @param a_pu64 Where to return the opcode quad word.
3202 * @remark Implicitly references pVCpu.
3203 */
3204#ifndef IEM_WITH_SETJMP
3205# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3206 do \
3207 { \
3208 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3209 if (rcStrict2 != VINF_SUCCESS) \
3210 return rcStrict2; \
3211 } while (0)
3212#else
3213# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3214#endif
3215
3216
3217/** @name Misc Worker Functions.
3218 * @{
3219 */
3220
3221/**
3222 * Gets the exception class for the specified exception vector.
3223 *
3224 * @returns The class of the specified exception.
3225 * @param uVector The exception vector.
3226 */
3227IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3228{
3229 Assert(uVector <= X86_XCPT_LAST);
3230 switch (uVector)
3231 {
3232 case X86_XCPT_DE:
3233 case X86_XCPT_TS:
3234 case X86_XCPT_NP:
3235 case X86_XCPT_SS:
3236 case X86_XCPT_GP:
3237 case X86_XCPT_SX: /* AMD only */
3238 return IEMXCPTCLASS_CONTRIBUTORY;
3239
3240 case X86_XCPT_PF:
3241 case X86_XCPT_VE: /* Intel only */
3242 return IEMXCPTCLASS_PAGE_FAULT;
3243
3244 case X86_XCPT_DF:
3245 return IEMXCPTCLASS_DOUBLE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3275
3276 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3277 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3278 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3279 {
3280 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3281 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3282 {
3283 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3284 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3286 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3287 {
3288 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3289 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3290 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3291 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3292 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3293 }
3294 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3295 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3296 {
3297 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3298 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3299 }
3300 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3302 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3303 {
3304 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3305 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3306 }
3307 }
3308 else
3309 {
3310 if (uPrevVector == X86_XCPT_NMI)
3311 {
3312 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3313 if (uCurVector == X86_XCPT_PF)
3314 {
3315 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3316 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3317 }
3318 }
3319 else if ( uPrevVector == X86_XCPT_AC
3320 && uCurVector == X86_XCPT_AC)
3321 {
3322 enmRaise = IEMXCPTRAISE_CPU_HANG;
3323 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3324 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3325 }
3326 }
3327 }
3328 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3329 {
3330 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3331 if (uCurVector == X86_XCPT_PF)
3332 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3333 }
3334 else
3335 {
3336 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3337 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3338 }
3339
3340 if (pfXcptRaiseInfo)
3341 *pfXcptRaiseInfo = fRaiseInfo;
3342 return enmRaise;
3343}
3344
3345
3346/**
3347 * Enters the CPU shutdown state initiated by a triple fault or other
3348 * unrecoverable conditions.
3349 *
3350 * @returns Strict VBox status code.
3351 * @param pVCpu The cross context virtual CPU structure of the
3352 * calling thread.
3353 */
3354IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3355{
3356 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3357 {
3358 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3359 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3360 }
3361
3362 RT_NOREF(pVCpu);
3363 return VINF_EM_TRIPLE_FAULT;
3364}
3365
3366
3367/**
3368 * Validates a new SS segment.
3369 *
3370 * @returns VBox strict status code.
3371 * @param pVCpu The cross context virtual CPU structure of the
3372 * calling thread.
3373 * @param pCtx The CPU context.
3374 * @param NewSS The new SS selctor.
3375 * @param uCpl The CPL to load the stack for.
3376 * @param pDesc Where to return the descriptor.
3377 */
3378IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3379{
3380 NOREF(pCtx);
3381
3382 /* Null selectors are not allowed (we're not called for dispatching
3383 interrupts with SS=0 in long mode). */
3384 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3385 {
3386 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3387 return iemRaiseTaskSwitchFault0(pVCpu);
3388 }
3389
3390 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3391 if ((NewSS & X86_SEL_RPL) != uCpl)
3392 {
3393 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3394 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3395 }
3396
3397 /*
3398 * Read the descriptor.
3399 */
3400 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3401 if (rcStrict != VINF_SUCCESS)
3402 return rcStrict;
3403
3404 /*
3405 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3406 */
3407 if (!pDesc->Legacy.Gen.u1DescType)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3414 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3417 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3418 }
3419 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3420 {
3421 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3422 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3423 }
3424
3425 /* Is it there? */
3426 /** @todo testcase: Is this checked before the canonical / limit check below? */
3427 if (!pDesc->Legacy.Gen.u1Present)
3428 {
3429 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3430 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3431 }
3432
3433 return VINF_SUCCESS;
3434}
3435
3436
3437/**
3438 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3439 * not.
3440 *
3441 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3442 * @param a_pCtx The CPU context.
3443 */
3444#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3445# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3446 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3447 ? (a_pCtx)->eflags.u \
3448 : CPUMRawGetEFlags(a_pVCpu) )
3449#else
3450# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3451 ( (a_pCtx)->eflags.u )
3452#endif
3453
3454/**
3455 * Updates the EFLAGS in the correct manner wrt. PATM.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 * @param a_fEfl The new EFLAGS.
3460 */
3461#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3462# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3463 do { \
3464 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3465 (a_pCtx)->eflags.u = (a_fEfl); \
3466 else \
3467 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3468 } while (0)
3469#else
3470# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3471 do { \
3472 (a_pCtx)->eflags.u = (a_fEfl); \
3473 } while (0)
3474#endif
3475
3476
3477/** @} */
3478
3479/** @name Raising Exceptions.
3480 *
3481 * @{
3482 */
3483
3484
3485/**
3486 * Loads the specified stack far pointer from the TSS.
3487 *
3488 * @returns VBox strict status code.
3489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3490 * @param pCtx The CPU context.
3491 * @param uCpl The CPL to load the stack for.
3492 * @param pSelSS Where to return the new stack segment.
3493 * @param puEsp Where to return the new stack pointer.
3494 */
3495IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3496 PRTSEL pSelSS, uint32_t *puEsp)
3497{
3498 VBOXSTRICTRC rcStrict;
3499 Assert(uCpl < 4);
3500
3501 switch (pCtx->tr.Attr.n.u4Type)
3502 {
3503 /*
3504 * 16-bit TSS (X86TSS16).
3505 */
3506 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3507 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3508 {
3509 uint32_t off = uCpl * 4 + 2;
3510 if (off + 4 <= pCtx->tr.u32Limit)
3511 {
3512 /** @todo check actual access pattern here. */
3513 uint32_t u32Tmp = 0; /* gcc maybe... */
3514 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3515 if (rcStrict == VINF_SUCCESS)
3516 {
3517 *puEsp = RT_LOWORD(u32Tmp);
3518 *pSelSS = RT_HIWORD(u32Tmp);
3519 return VINF_SUCCESS;
3520 }
3521 }
3522 else
3523 {
3524 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3525 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3526 }
3527 break;
3528 }
3529
3530 /*
3531 * 32-bit TSS (X86TSS32).
3532 */
3533 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3534 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3535 {
3536 uint32_t off = uCpl * 8 + 4;
3537 if (off + 7 <= pCtx->tr.u32Limit)
3538 {
3539/** @todo check actual access pattern here. */
3540 uint64_t u64Tmp;
3541 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 *puEsp = u64Tmp & UINT32_MAX;
3545 *pSelSS = (RTSEL)(u64Tmp >> 32);
3546 return VINF_SUCCESS;
3547 }
3548 }
3549 else
3550 {
3551 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3552 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3553 }
3554 break;
3555 }
3556
3557 default:
3558 AssertFailed();
3559 rcStrict = VERR_IEM_IPE_4;
3560 break;
3561 }
3562
3563 *puEsp = 0; /* make gcc happy */
3564 *pSelSS = 0; /* make gcc happy */
3565 return rcStrict;
3566}
3567
3568
3569/**
3570 * Loads the specified stack pointer from the 64-bit TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param pCtx The CPU context.
3575 * @param uCpl The CPL to load the stack for.
3576 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3577 * @param puRsp Where to return the new stack pointer.
3578 */
3579IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3580{
3581 Assert(uCpl < 4);
3582 Assert(uIst < 8);
3583 *puRsp = 0; /* make gcc happy */
3584
3585 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3586
3587 uint32_t off;
3588 if (uIst)
3589 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3590 else
3591 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3592 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3593 {
3594 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3595 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3596 }
3597
3598 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3599}
3600
3601
3602/**
3603 * Adjust the CPU state according to the exception being raised.
3604 *
3605 * @param pCtx The CPU context.
3606 * @param u8Vector The exception that has been raised.
3607 */
3608DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3609{
3610 switch (u8Vector)
3611 {
3612 case X86_XCPT_DB:
3613 pCtx->dr[7] &= ~X86_DR7_GD;
3614 break;
3615 /** @todo Read the AMD and Intel exception reference... */
3616 }
3617}
3618
3619
3620/**
3621 * Implements exceptions and interrupts for real mode.
3622 *
3623 * @returns VBox strict status code.
3624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3625 * @param pCtx The CPU context.
3626 * @param cbInstr The number of bytes to offset rIP by in the return
3627 * address.
3628 * @param u8Vector The interrupt / exception vector number.
3629 * @param fFlags The flags.
3630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3632 */
3633IEM_STATIC VBOXSTRICTRC
3634iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3635 PCPUMCTX pCtx,
3636 uint8_t cbInstr,
3637 uint8_t u8Vector,
3638 uint32_t fFlags,
3639 uint16_t uErr,
3640 uint64_t uCr2)
3641{
3642 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3643 NOREF(uErr); NOREF(uCr2);
3644
3645 /*
3646 * Read the IDT entry.
3647 */
3648 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3649 {
3650 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3651 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3652 }
3653 RTFAR16 Idte;
3654 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3655 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3656 return rcStrict;
3657
3658 /*
3659 * Push the stack frame.
3660 */
3661 uint16_t *pu16Frame;
3662 uint64_t uNewRsp;
3663 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666
3667 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3668#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3669 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3670 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3671 fEfl |= UINT16_C(0xf000);
3672#endif
3673 pu16Frame[2] = (uint16_t)fEfl;
3674 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3675 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3676 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3677 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3678 return rcStrict;
3679
3680 /*
3681 * Load the vector address into cs:ip and make exception specific state
3682 * adjustments.
3683 */
3684 pCtx->cs.Sel = Idte.sel;
3685 pCtx->cs.ValidSel = Idte.sel;
3686 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3687 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3688 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3689 pCtx->rip = Idte.off;
3690 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3691 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3692
3693 /** @todo do we actually do this in real mode? */
3694 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3695 iemRaiseXcptAdjustState(pCtx, u8Vector);
3696
3697 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Loads a NULL data selector into when coming from V8086 mode.
3703 *
3704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3705 * @param pSReg Pointer to the segment register.
3706 */
3707IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3708{
3709 pSReg->Sel = 0;
3710 pSReg->ValidSel = 0;
3711 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3712 {
3713 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3714 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3715 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3716 }
3717 else
3718 {
3719 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3720 /** @todo check this on AMD-V */
3721 pSReg->u64Base = 0;
3722 pSReg->u32Limit = 0;
3723 }
3724}
3725
3726
3727/**
3728 * Loads a segment selector during a task switch in V8086 mode.
3729 *
3730 * @param pSReg Pointer to the segment register.
3731 * @param uSel The selector value to load.
3732 */
3733IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3734{
3735 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3736 pSReg->Sel = uSel;
3737 pSReg->ValidSel = uSel;
3738 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3739 pSReg->u64Base = uSel << 4;
3740 pSReg->u32Limit = 0xffff;
3741 pSReg->Attr.u = 0xf3;
3742}
3743
3744
3745/**
3746 * Loads a NULL data selector into a selector register, both the hidden and
3747 * visible parts, in protected mode.
3748 *
3749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3750 * @param pSReg Pointer to the segment register.
3751 * @param uRpl The RPL.
3752 */
3753IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3754{
3755 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3756 * data selector in protected mode. */
3757 pSReg->Sel = uRpl;
3758 pSReg->ValidSel = uRpl;
3759 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3760 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3761 {
3762 /* VT-x (Intel 3960x) observed doing something like this. */
3763 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3764 pSReg->u32Limit = UINT32_MAX;
3765 pSReg->u64Base = 0;
3766 }
3767 else
3768 {
3769 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3770 pSReg->u32Limit = 0;
3771 pSReg->u64Base = 0;
3772 }
3773}
3774
3775
3776/**
3777 * Loads a segment selector during a task switch in protected mode.
3778 *
3779 * In this task switch scenario, we would throw \#TS exceptions rather than
3780 * \#GPs.
3781 *
3782 * @returns VBox strict status code.
3783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3784 * @param pSReg Pointer to the segment register.
3785 * @param uSel The new selector value.
3786 *
3787 * @remarks This does _not_ handle CS or SS.
3788 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3789 */
3790IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3791{
3792 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3793
3794 /* Null data selector. */
3795 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3796 {
3797 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3798 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3800 return VINF_SUCCESS;
3801 }
3802
3803 /* Fetch the descriptor. */
3804 IEMSELDESC Desc;
3805 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3806 if (rcStrict != VINF_SUCCESS)
3807 {
3808 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3809 VBOXSTRICTRC_VAL(rcStrict)));
3810 return rcStrict;
3811 }
3812
3813 /* Must be a data segment or readable code segment. */
3814 if ( !Desc.Legacy.Gen.u1DescType
3815 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3816 {
3817 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3818 Desc.Legacy.Gen.u4Type));
3819 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3820 }
3821
3822 /* Check privileges for data segments and non-conforming code segments. */
3823 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3825 {
3826 /* The RPL and the new CPL must be less than or equal to the DPL. */
3827 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3828 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3829 {
3830 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3831 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3832 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3833 }
3834 }
3835
3836 /* Is it there? */
3837 if (!Desc.Legacy.Gen.u1Present)
3838 {
3839 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3840 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842
3843 /* The base and limit. */
3844 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3845 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3846
3847 /*
3848 * Ok, everything checked out fine. Now set the accessed bit before
3849 * committing the result into the registers.
3850 */
3851 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3852 {
3853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3854 if (rcStrict != VINF_SUCCESS)
3855 return rcStrict;
3856 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3857 }
3858
3859 /* Commit */
3860 pSReg->Sel = uSel;
3861 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3862 pSReg->u32Limit = cbLimit;
3863 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3864 pSReg->ValidSel = uSel;
3865 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3866 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3867 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3868
3869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * Performs a task switch.
3877 *
3878 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3879 * caller is responsible for performing the necessary checks (like DPL, TSS
3880 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3881 * reference for JMP, CALL, IRET.
3882 *
3883 * If the task switch is the due to a software interrupt or hardware exception,
3884 * the caller is responsible for validating the TSS selector and descriptor. See
3885 * Intel Instruction reference for INT n.
3886 *
3887 * @returns VBox strict status code.
3888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3889 * @param pCtx The CPU context.
3890 * @param enmTaskSwitch What caused this task switch.
3891 * @param uNextEip The EIP effective after the task switch.
3892 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3895 * @param SelTSS The TSS selector of the new task.
3896 * @param pNewDescTSS Pointer to the new TSS descriptor.
3897 */
3898IEM_STATIC VBOXSTRICTRC
3899iemTaskSwitch(PVMCPU pVCpu,
3900 PCPUMCTX pCtx,
3901 IEMTASKSWITCH enmTaskSwitch,
3902 uint32_t uNextEip,
3903 uint32_t fFlags,
3904 uint16_t uErr,
3905 uint64_t uCr2,
3906 RTSEL SelTSS,
3907 PIEMSELDESC pNewDescTSS)
3908{
3909 Assert(!IEM_IS_REAL_MODE(pVCpu));
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911
3912 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3913 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3914 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3917
3918 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3919 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3920
3921 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3922 fIsNewTSS386, pCtx->eip, uNextEip));
3923
3924 /* Update CR2 in case it's a page-fault. */
3925 /** @todo This should probably be done much earlier in IEM/PGM. See
3926 * @bugref{5653#c49}. */
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pCtx->cr2 = uCr2;
3929
3930 /*
3931 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3932 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3933 */
3934 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3935 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3936 if (uNewTSSLimit < uNewTSSLimitMin)
3937 {
3938 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3939 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /*
3944 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3945 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3946 */
3947 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3948 {
3949 uint32_t const uExitInfo1 = SelTSS;
3950 uint32_t uExitInfo2 = uErr;
3951 switch (enmTaskSwitch)
3952 {
3953 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3954 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3955 default: break;
3956 }
3957 if (fFlags & IEM_XCPT_FLAGS_ERR)
3958 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3959 if (pCtx->eflags.Bits.u1RF)
3960 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3961
3962 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3963 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3964 RT_NOREF2(uExitInfo1, uExitInfo2);
3965 }
3966 /** @todo Nested-VMX task-switch intercept. */
3967
3968 /*
3969 * Check the current TSS limit. The last written byte to the current TSS during the
3970 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3971 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3972 *
3973 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3974 * end up with smaller than "legal" TSS limits.
3975 */
3976 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3977 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3978 if (uCurTSSLimit < uCurTSSLimitMin)
3979 {
3980 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3981 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3982 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3983 }
3984
3985 /*
3986 * Verify that the new TSS can be accessed and map it. Map only the required contents
3987 * and not the entire TSS.
3988 */
3989 void *pvNewTSS;
3990 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3991 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3992 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3993 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3994 * not perform correct translation if this happens. See Intel spec. 7.2.1
3995 * "Task-State Segment" */
3996 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3997 if (rcStrict != VINF_SUCCESS)
3998 {
3999 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4000 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4001 return rcStrict;
4002 }
4003
4004 /*
4005 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4006 */
4007 uint32_t u32EFlags = pCtx->eflags.u32;
4008 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4009 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4010 {
4011 PX86DESC pDescCurTSS;
4012 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4013 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4014 if (rcStrict != VINF_SUCCESS)
4015 {
4016 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4017 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4018 return rcStrict;
4019 }
4020
4021 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4022 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4023 if (rcStrict != VINF_SUCCESS)
4024 {
4025 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4026 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4027 return rcStrict;
4028 }
4029
4030 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4031 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4032 {
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4035 u32EFlags &= ~X86_EFL_NT;
4036 }
4037 }
4038
4039 /*
4040 * Save the CPU state into the current TSS.
4041 */
4042 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4043 if (GCPtrNewTSS == GCPtrCurTSS)
4044 {
4045 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4046 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4047 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4048 }
4049 if (fIsNewTSS386)
4050 {
4051 /*
4052 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4053 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4054 */
4055 void *pvCurTSS32;
4056 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4057 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4058 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4059 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4060 if (rcStrict != VINF_SUCCESS)
4061 {
4062 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4063 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4064 return rcStrict;
4065 }
4066
4067 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4068 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4069 pCurTSS32->eip = uNextEip;
4070 pCurTSS32->eflags = u32EFlags;
4071 pCurTSS32->eax = pCtx->eax;
4072 pCurTSS32->ecx = pCtx->ecx;
4073 pCurTSS32->edx = pCtx->edx;
4074 pCurTSS32->ebx = pCtx->ebx;
4075 pCurTSS32->esp = pCtx->esp;
4076 pCurTSS32->ebp = pCtx->ebp;
4077 pCurTSS32->esi = pCtx->esi;
4078 pCurTSS32->edi = pCtx->edi;
4079 pCurTSS32->es = pCtx->es.Sel;
4080 pCurTSS32->cs = pCtx->cs.Sel;
4081 pCurTSS32->ss = pCtx->ss.Sel;
4082 pCurTSS32->ds = pCtx->ds.Sel;
4083 pCurTSS32->fs = pCtx->fs.Sel;
4084 pCurTSS32->gs = pCtx->gs.Sel;
4085
4086 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4087 if (rcStrict != VINF_SUCCESS)
4088 {
4089 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4090 VBOXSTRICTRC_VAL(rcStrict)));
4091 return rcStrict;
4092 }
4093 }
4094 else
4095 {
4096 /*
4097 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4098 */
4099 void *pvCurTSS16;
4100 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4101 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4102 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4103 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4107 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4112 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4113 pCurTSS16->ip = uNextEip;
4114 pCurTSS16->flags = u32EFlags;
4115 pCurTSS16->ax = pCtx->ax;
4116 pCurTSS16->cx = pCtx->cx;
4117 pCurTSS16->dx = pCtx->dx;
4118 pCurTSS16->bx = pCtx->bx;
4119 pCurTSS16->sp = pCtx->sp;
4120 pCurTSS16->bp = pCtx->bp;
4121 pCurTSS16->si = pCtx->si;
4122 pCurTSS16->di = pCtx->di;
4123 pCurTSS16->es = pCtx->es.Sel;
4124 pCurTSS16->cs = pCtx->cs.Sel;
4125 pCurTSS16->ss = pCtx->ss.Sel;
4126 pCurTSS16->ds = pCtx->ds.Sel;
4127
4128 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4132 VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135 }
4136
4137 /*
4138 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4139 */
4140 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4141 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4142 {
4143 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4144 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4145 pNewTSS->selPrev = pCtx->tr.Sel;
4146 }
4147
4148 /*
4149 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4150 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4151 */
4152 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4153 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4154 bool fNewDebugTrap;
4155 if (fIsNewTSS386)
4156 {
4157 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4158 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4159 uNewEip = pNewTSS32->eip;
4160 uNewEflags = pNewTSS32->eflags;
4161 uNewEax = pNewTSS32->eax;
4162 uNewEcx = pNewTSS32->ecx;
4163 uNewEdx = pNewTSS32->edx;
4164 uNewEbx = pNewTSS32->ebx;
4165 uNewEsp = pNewTSS32->esp;
4166 uNewEbp = pNewTSS32->ebp;
4167 uNewEsi = pNewTSS32->esi;
4168 uNewEdi = pNewTSS32->edi;
4169 uNewES = pNewTSS32->es;
4170 uNewCS = pNewTSS32->cs;
4171 uNewSS = pNewTSS32->ss;
4172 uNewDS = pNewTSS32->ds;
4173 uNewFS = pNewTSS32->fs;
4174 uNewGS = pNewTSS32->gs;
4175 uNewLdt = pNewTSS32->selLdt;
4176 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4177 }
4178 else
4179 {
4180 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4181 uNewCr3 = 0;
4182 uNewEip = pNewTSS16->ip;
4183 uNewEflags = pNewTSS16->flags;
4184 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4185 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4186 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4187 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4188 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4189 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4190 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4191 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4192 uNewES = pNewTSS16->es;
4193 uNewCS = pNewTSS16->cs;
4194 uNewSS = pNewTSS16->ss;
4195 uNewDS = pNewTSS16->ds;
4196 uNewFS = 0;
4197 uNewGS = 0;
4198 uNewLdt = pNewTSS16->selLdt;
4199 fNewDebugTrap = false;
4200 }
4201
4202 if (GCPtrNewTSS == GCPtrCurTSS)
4203 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4204 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4205
4206 /*
4207 * We're done accessing the new TSS.
4208 */
4209 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4210 if (rcStrict != VINF_SUCCESS)
4211 {
4212 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4213 return rcStrict;
4214 }
4215
4216 /*
4217 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4218 */
4219 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4220 {
4221 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4222 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4223 if (rcStrict != VINF_SUCCESS)
4224 {
4225 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4226 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /* Check that the descriptor indicates the new TSS is available (not busy). */
4231 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4232 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4233 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4234
4235 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4236 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4240 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243 }
4244
4245 /*
4246 * From this point on, we're technically in the new task. We will defer exceptions
4247 * until the completion of the task switch but before executing any instructions in the new task.
4248 */
4249 pCtx->tr.Sel = SelTSS;
4250 pCtx->tr.ValidSel = SelTSS;
4251 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4252 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4253 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4254 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4256
4257 /* Set the busy bit in TR. */
4258 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4259 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4260 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4261 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4262 {
4263 uNewEflags |= X86_EFL_NT;
4264 }
4265
4266 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4267 pCtx->cr0 |= X86_CR0_TS;
4268 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4269
4270 pCtx->eip = uNewEip;
4271 pCtx->eax = uNewEax;
4272 pCtx->ecx = uNewEcx;
4273 pCtx->edx = uNewEdx;
4274 pCtx->ebx = uNewEbx;
4275 pCtx->esp = uNewEsp;
4276 pCtx->ebp = uNewEbp;
4277 pCtx->esi = uNewEsi;
4278 pCtx->edi = uNewEdi;
4279
4280 uNewEflags &= X86_EFL_LIVE_MASK;
4281 uNewEflags |= X86_EFL_RA1_MASK;
4282 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4283
4284 /*
4285 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4286 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4287 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4288 */
4289 pCtx->es.Sel = uNewES;
4290 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4291
4292 pCtx->cs.Sel = uNewCS;
4293 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4294
4295 pCtx->ss.Sel = uNewSS;
4296 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4297
4298 pCtx->ds.Sel = uNewDS;
4299 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4300
4301 pCtx->fs.Sel = uNewFS;
4302 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4303
4304 pCtx->gs.Sel = uNewGS;
4305 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4307
4308 pCtx->ldtr.Sel = uNewLdt;
4309 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4310 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4311 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4312
4313 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4314 {
4315 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4316 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4317 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4318 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4319 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4320 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4321 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4322 }
4323
4324 /*
4325 * Switch CR3 for the new task.
4326 */
4327 if ( fIsNewTSS386
4328 && (pCtx->cr0 & X86_CR0_PG))
4329 {
4330 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4331 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4332 {
4333 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4334 AssertRCSuccessReturn(rc, rc);
4335 }
4336 else
4337 pCtx->cr3 = uNewCr3;
4338
4339 /* Inform PGM. */
4340 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4341 {
4342 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4343 AssertRCReturn(rc, rc);
4344 /* ignore informational status codes */
4345 }
4346 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4347 }
4348
4349 /*
4350 * Switch LDTR for the new task.
4351 */
4352 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4353 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4354 else
4355 {
4356 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4357
4358 IEMSELDESC DescNewLdt;
4359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4360 if (rcStrict != VINF_SUCCESS)
4361 {
4362 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4363 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4364 return rcStrict;
4365 }
4366 if ( !DescNewLdt.Legacy.Gen.u1Present
4367 || DescNewLdt.Legacy.Gen.u1DescType
4368 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4369 {
4370 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4371 uNewLdt, DescNewLdt.Legacy.u));
4372 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4373 }
4374
4375 pCtx->ldtr.ValidSel = uNewLdt;
4376 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4377 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4378 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4379 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4380 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4381 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4382 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4383 }
4384
4385 IEMSELDESC DescSS;
4386 if (IEM_IS_V86_MODE(pVCpu))
4387 {
4388 pVCpu->iem.s.uCpl = 3;
4389 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4390 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4391 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4392 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4393 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4394 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4395
4396 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4397 DescSS.Legacy.u = 0;
4398 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4399 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4400 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4401 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4402 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4403 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4404 DescSS.Legacy.Gen.u2Dpl = 3;
4405 }
4406 else
4407 {
4408 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4409
4410 /*
4411 * Load the stack segment for the new task.
4412 */
4413 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4414 {
4415 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4416 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4417 }
4418
4419 /* Fetch the descriptor. */
4420 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4421 if (rcStrict != VINF_SUCCESS)
4422 {
4423 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4424 VBOXSTRICTRC_VAL(rcStrict)));
4425 return rcStrict;
4426 }
4427
4428 /* SS must be a data segment and writable. */
4429 if ( !DescSS.Legacy.Gen.u1DescType
4430 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4431 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4432 {
4433 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4434 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4435 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4436 }
4437
4438 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4439 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4440 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4441 {
4442 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4443 uNewCpl));
4444 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4445 }
4446
4447 /* Is it there? */
4448 if (!DescSS.Legacy.Gen.u1Present)
4449 {
4450 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4451 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4455 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4456
4457 /* Set the accessed bit before committing the result into SS. */
4458 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4459 {
4460 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4461 if (rcStrict != VINF_SUCCESS)
4462 return rcStrict;
4463 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4464 }
4465
4466 /* Commit SS. */
4467 pCtx->ss.Sel = uNewSS;
4468 pCtx->ss.ValidSel = uNewSS;
4469 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4470 pCtx->ss.u32Limit = cbLimit;
4471 pCtx->ss.u64Base = u64Base;
4472 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4474
4475 /* CPL has changed, update IEM before loading rest of segments. */
4476 pVCpu->iem.s.uCpl = uNewCpl;
4477
4478 /*
4479 * Load the data segments for the new task.
4480 */
4481 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4482 if (rcStrict != VINF_SUCCESS)
4483 return rcStrict;
4484 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4485 if (rcStrict != VINF_SUCCESS)
4486 return rcStrict;
4487 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4488 if (rcStrict != VINF_SUCCESS)
4489 return rcStrict;
4490 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4491 if (rcStrict != VINF_SUCCESS)
4492 return rcStrict;
4493
4494 /*
4495 * Load the code segment for the new task.
4496 */
4497 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4498 {
4499 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4500 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* Fetch the descriptor. */
4504 IEMSELDESC DescCS;
4505 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4506 if (rcStrict != VINF_SUCCESS)
4507 {
4508 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4509 return rcStrict;
4510 }
4511
4512 /* CS must be a code segment. */
4513 if ( !DescCS.Legacy.Gen.u1DescType
4514 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4515 {
4516 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4517 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4518 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4519 }
4520
4521 /* For conforming CS, DPL must be less than or equal to the RPL. */
4522 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4523 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4524 {
4525 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4526 DescCS.Legacy.Gen.u2Dpl));
4527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4528 }
4529
4530 /* For non-conforming CS, DPL must match RPL. */
4531 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4532 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4533 {
4534 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4535 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* Is it there? */
4540 if (!DescCS.Legacy.Gen.u1Present)
4541 {
4542 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4543 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4547 u64Base = X86DESC_BASE(&DescCS.Legacy);
4548
4549 /* Set the accessed bit before committing the result into CS. */
4550 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4551 {
4552 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4556 }
4557
4558 /* Commit CS. */
4559 pCtx->cs.Sel = uNewCS;
4560 pCtx->cs.ValidSel = uNewCS;
4561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4562 pCtx->cs.u32Limit = cbLimit;
4563 pCtx->cs.u64Base = u64Base;
4564 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4566 }
4567
4568 /** @todo Debug trap. */
4569 if (fIsNewTSS386 && fNewDebugTrap)
4570 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4571
4572 /*
4573 * Construct the error code masks based on what caused this task switch.
4574 * See Intel Instruction reference for INT.
4575 */
4576 uint16_t uExt;
4577 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4578 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4579 {
4580 uExt = 1;
4581 }
4582 else
4583 uExt = 0;
4584
4585 /*
4586 * Push any error code on to the new stack.
4587 */
4588 if (fFlags & IEM_XCPT_FLAGS_ERR)
4589 {
4590 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4591 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4592 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4593
4594 /* Check that there is sufficient space on the stack. */
4595 /** @todo Factor out segment limit checking for normal/expand down segments
4596 * into a separate function. */
4597 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4598 {
4599 if ( pCtx->esp - 1 > cbLimitSS
4600 || pCtx->esp < cbStackFrame)
4601 {
4602 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4603 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4604 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4605 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4606 }
4607 }
4608 else
4609 {
4610 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4611 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4612 {
4613 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4614 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4615 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4616 }
4617 }
4618
4619
4620 if (fIsNewTSS386)
4621 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4622 else
4623 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4624 if (rcStrict != VINF_SUCCESS)
4625 {
4626 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4627 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4628 return rcStrict;
4629 }
4630 }
4631
4632 /* Check the new EIP against the new CS limit. */
4633 if (pCtx->eip > pCtx->cs.u32Limit)
4634 {
4635 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4636 pCtx->eip, pCtx->cs.u32Limit));
4637 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4638 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4639 }
4640
4641 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4642 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4643}
4644
4645
4646/**
4647 * Implements exceptions and interrupts for protected mode.
4648 *
4649 * @returns VBox strict status code.
4650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4651 * @param pCtx The CPU context.
4652 * @param cbInstr The number of bytes to offset rIP by in the return
4653 * address.
4654 * @param u8Vector The interrupt / exception vector number.
4655 * @param fFlags The flags.
4656 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4657 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4658 */
4659IEM_STATIC VBOXSTRICTRC
4660iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4661 PCPUMCTX pCtx,
4662 uint8_t cbInstr,
4663 uint8_t u8Vector,
4664 uint32_t fFlags,
4665 uint16_t uErr,
4666 uint64_t uCr2)
4667{
4668 /*
4669 * Read the IDT entry.
4670 */
4671 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4672 {
4673 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4674 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4675 }
4676 X86DESC Idte;
4677 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4678 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4679 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4680 return rcStrict;
4681 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4682 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4683 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4684
4685 /*
4686 * Check the descriptor type, DPL and such.
4687 * ASSUMES this is done in the same order as described for call-gate calls.
4688 */
4689 if (Idte.Gate.u1DescType)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 bool fTaskGate = false;
4695 uint8_t f32BitGate = true;
4696 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4697 switch (Idte.Gate.u4Type)
4698 {
4699 case X86_SEL_TYPE_SYS_UNDEFINED:
4700 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4701 case X86_SEL_TYPE_SYS_LDT:
4702 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4703 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4704 case X86_SEL_TYPE_SYS_UNDEFINED2:
4705 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4706 case X86_SEL_TYPE_SYS_UNDEFINED3:
4707 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4708 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4709 case X86_SEL_TYPE_SYS_UNDEFINED4:
4710 {
4711 /** @todo check what actually happens when the type is wrong...
4712 * esp. call gates. */
4713 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4714 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4715 }
4716
4717 case X86_SEL_TYPE_SYS_286_INT_GATE:
4718 f32BitGate = false;
4719 RT_FALL_THRU();
4720 case X86_SEL_TYPE_SYS_386_INT_GATE:
4721 fEflToClear |= X86_EFL_IF;
4722 break;
4723
4724 case X86_SEL_TYPE_SYS_TASK_GATE:
4725 fTaskGate = true;
4726#ifndef IEM_IMPLEMENTS_TASKSWITCH
4727 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4728#endif
4729 break;
4730
4731 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4732 f32BitGate = false;
4733 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4734 break;
4735
4736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4737 }
4738
4739 /* Check DPL against CPL if applicable. */
4740 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4741 {
4742 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4743 {
4744 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4745 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4746 }
4747 }
4748
4749 /* Is it there? */
4750 if (!Idte.Gate.u1Present)
4751 {
4752 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4753 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4754 }
4755
4756 /* Is it a task-gate? */
4757 if (fTaskGate)
4758 {
4759 /*
4760 * Construct the error code masks based on what caused this task switch.
4761 * See Intel Instruction reference for INT.
4762 */
4763 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4764 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4765 RTSEL SelTSS = Idte.Gate.u16Sel;
4766
4767 /*
4768 * Fetch the TSS descriptor in the GDT.
4769 */
4770 IEMSELDESC DescTSS;
4771 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4772 if (rcStrict != VINF_SUCCESS)
4773 {
4774 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4775 VBOXSTRICTRC_VAL(rcStrict)));
4776 return rcStrict;
4777 }
4778
4779 /* The TSS descriptor must be a system segment and be available (not busy). */
4780 if ( DescTSS.Legacy.Gen.u1DescType
4781 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4782 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4783 {
4784 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4785 u8Vector, SelTSS, DescTSS.Legacy.au64));
4786 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4787 }
4788
4789 /* The TSS must be present. */
4790 if (!DescTSS.Legacy.Gen.u1Present)
4791 {
4792 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4793 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4794 }
4795
4796 /* Do the actual task switch. */
4797 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4798 }
4799
4800 /* A null CS is bad. */
4801 RTSEL NewCS = Idte.Gate.u16Sel;
4802 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4803 {
4804 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4805 return iemRaiseGeneralProtectionFault0(pVCpu);
4806 }
4807
4808 /* Fetch the descriptor for the new CS. */
4809 IEMSELDESC DescCS;
4810 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4811 if (rcStrict != VINF_SUCCESS)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4814 return rcStrict;
4815 }
4816
4817 /* Must be a code segment. */
4818 if (!DescCS.Legacy.Gen.u1DescType)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4822 }
4823 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4824 {
4825 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4826 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4827 }
4828
4829 /* Don't allow lowering the privilege level. */
4830 /** @todo Does the lowering of privileges apply to software interrupts
4831 * only? This has bearings on the more-privileged or
4832 * same-privilege stack behavior further down. A testcase would
4833 * be nice. */
4834 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4837 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4838 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4839 }
4840
4841 /* Make sure the selector is present. */
4842 if (!DescCS.Legacy.Gen.u1Present)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4845 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4846 }
4847
4848 /* Check the new EIP against the new CS limit. */
4849 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4850 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4851 ? Idte.Gate.u16OffsetLow
4852 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4853 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4854 if (uNewEip > cbLimitCS)
4855 {
4856 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4857 u8Vector, uNewEip, cbLimitCS, NewCS));
4858 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4859 }
4860 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4861
4862 /* Calc the flag image to push. */
4863 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4864 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4865 fEfl &= ~X86_EFL_RF;
4866 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4867 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4868
4869 /* From V8086 mode only go to CPL 0. */
4870 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4871 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4872 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4875 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4876 }
4877
4878 /*
4879 * If the privilege level changes, we need to get a new stack from the TSS.
4880 * This in turns means validating the new SS and ESP...
4881 */
4882 if (uNewCpl != pVCpu->iem.s.uCpl)
4883 {
4884 RTSEL NewSS;
4885 uint32_t uNewEsp;
4886 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4887 if (rcStrict != VINF_SUCCESS)
4888 return rcStrict;
4889
4890 IEMSELDESC DescSS;
4891 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4892 if (rcStrict != VINF_SUCCESS)
4893 return rcStrict;
4894 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4895 if (!DescSS.Legacy.Gen.u1DefBig)
4896 {
4897 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4898 uNewEsp = (uint16_t)uNewEsp;
4899 }
4900
4901 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4902
4903 /* Check that there is sufficient space for the stack frame. */
4904 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4905 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4906 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4907 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4908
4909 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4910 {
4911 if ( uNewEsp - 1 > cbLimitSS
4912 || uNewEsp < cbStackFrame)
4913 {
4914 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4915 u8Vector, NewSS, uNewEsp, cbStackFrame));
4916 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4917 }
4918 }
4919 else
4920 {
4921 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4922 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4923 {
4924 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4925 u8Vector, NewSS, uNewEsp, cbStackFrame));
4926 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4927 }
4928 }
4929
4930 /*
4931 * Start making changes.
4932 */
4933
4934 /* Set the new CPL so that stack accesses use it. */
4935 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4936 pVCpu->iem.s.uCpl = uNewCpl;
4937
4938 /* Create the stack frame. */
4939 RTPTRUNION uStackFrame;
4940 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4941 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4942 if (rcStrict != VINF_SUCCESS)
4943 return rcStrict;
4944 void * const pvStackFrame = uStackFrame.pv;
4945 if (f32BitGate)
4946 {
4947 if (fFlags & IEM_XCPT_FLAGS_ERR)
4948 *uStackFrame.pu32++ = uErr;
4949 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4950 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4951 uStackFrame.pu32[2] = fEfl;
4952 uStackFrame.pu32[3] = pCtx->esp;
4953 uStackFrame.pu32[4] = pCtx->ss.Sel;
4954 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4955 if (fEfl & X86_EFL_VM)
4956 {
4957 uStackFrame.pu32[1] = pCtx->cs.Sel;
4958 uStackFrame.pu32[5] = pCtx->es.Sel;
4959 uStackFrame.pu32[6] = pCtx->ds.Sel;
4960 uStackFrame.pu32[7] = pCtx->fs.Sel;
4961 uStackFrame.pu32[8] = pCtx->gs.Sel;
4962 }
4963 }
4964 else
4965 {
4966 if (fFlags & IEM_XCPT_FLAGS_ERR)
4967 *uStackFrame.pu16++ = uErr;
4968 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4969 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4970 uStackFrame.pu16[2] = fEfl;
4971 uStackFrame.pu16[3] = pCtx->sp;
4972 uStackFrame.pu16[4] = pCtx->ss.Sel;
4973 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4974 if (fEfl & X86_EFL_VM)
4975 {
4976 uStackFrame.pu16[1] = pCtx->cs.Sel;
4977 uStackFrame.pu16[5] = pCtx->es.Sel;
4978 uStackFrame.pu16[6] = pCtx->ds.Sel;
4979 uStackFrame.pu16[7] = pCtx->fs.Sel;
4980 uStackFrame.pu16[8] = pCtx->gs.Sel;
4981 }
4982 }
4983 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4984 if (rcStrict != VINF_SUCCESS)
4985 return rcStrict;
4986
4987 /* Mark the selectors 'accessed' (hope this is the correct time). */
4988 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4989 * after pushing the stack frame? (Write protect the gdt + stack to
4990 * find out.) */
4991 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4992 {
4993 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4994 if (rcStrict != VINF_SUCCESS)
4995 return rcStrict;
4996 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4997 }
4998
4999 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5000 {
5001 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5002 if (rcStrict != VINF_SUCCESS)
5003 return rcStrict;
5004 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5005 }
5006
5007 /*
5008 * Start comitting the register changes (joins with the DPL=CPL branch).
5009 */
5010 pCtx->ss.Sel = NewSS;
5011 pCtx->ss.ValidSel = NewSS;
5012 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5013 pCtx->ss.u32Limit = cbLimitSS;
5014 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5015 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5016 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5017 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5018 * SP is loaded).
5019 * Need to check the other combinations too:
5020 * - 16-bit TSS, 32-bit handler
5021 * - 32-bit TSS, 16-bit handler */
5022 if (!pCtx->ss.Attr.n.u1DefBig)
5023 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5024 else
5025 pCtx->rsp = uNewEsp - cbStackFrame;
5026
5027 if (fEfl & X86_EFL_VM)
5028 {
5029 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5030 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5031 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5032 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5033 }
5034 }
5035 /*
5036 * Same privilege, no stack change and smaller stack frame.
5037 */
5038 else
5039 {
5040 uint64_t uNewRsp;
5041 RTPTRUNION uStackFrame;
5042 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5043 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5044 if (rcStrict != VINF_SUCCESS)
5045 return rcStrict;
5046 void * const pvStackFrame = uStackFrame.pv;
5047
5048 if (f32BitGate)
5049 {
5050 if (fFlags & IEM_XCPT_FLAGS_ERR)
5051 *uStackFrame.pu32++ = uErr;
5052 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5053 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5054 uStackFrame.pu32[2] = fEfl;
5055 }
5056 else
5057 {
5058 if (fFlags & IEM_XCPT_FLAGS_ERR)
5059 *uStackFrame.pu16++ = uErr;
5060 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5061 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5062 uStackFrame.pu16[2] = fEfl;
5063 }
5064 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5065 if (rcStrict != VINF_SUCCESS)
5066 return rcStrict;
5067
5068 /* Mark the CS selector as 'accessed'. */
5069 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5070 {
5071 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5075 }
5076
5077 /*
5078 * Start committing the register changes (joins with the other branch).
5079 */
5080 pCtx->rsp = uNewRsp;
5081 }
5082
5083 /* ... register committing continues. */
5084 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5085 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5086 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5087 pCtx->cs.u32Limit = cbLimitCS;
5088 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5089 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5090
5091 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5092 fEfl &= ~fEflToClear;
5093 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5094
5095 if (fFlags & IEM_XCPT_FLAGS_CR2)
5096 pCtx->cr2 = uCr2;
5097
5098 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5099 iemRaiseXcptAdjustState(pCtx, u8Vector);
5100
5101 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5102}
5103
5104
5105/**
5106 * Implements exceptions and interrupts for long mode.
5107 *
5108 * @returns VBox strict status code.
5109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5110 * @param pCtx The CPU context.
5111 * @param cbInstr The number of bytes to offset rIP by in the return
5112 * address.
5113 * @param u8Vector The interrupt / exception vector number.
5114 * @param fFlags The flags.
5115 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5116 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5117 */
5118IEM_STATIC VBOXSTRICTRC
5119iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5120 PCPUMCTX pCtx,
5121 uint8_t cbInstr,
5122 uint8_t u8Vector,
5123 uint32_t fFlags,
5124 uint16_t uErr,
5125 uint64_t uCr2)
5126{
5127 /*
5128 * Read the IDT entry.
5129 */
5130 uint16_t offIdt = (uint16_t)u8Vector << 4;
5131 if (pCtx->idtr.cbIdt < offIdt + 7)
5132 {
5133 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5134 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5135 }
5136 X86DESC64 Idte;
5137 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5138 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5139 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5140 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5141 return rcStrict;
5142 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5143 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5144 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5145
5146 /*
5147 * Check the descriptor type, DPL and such.
5148 * ASSUMES this is done in the same order as described for call-gate calls.
5149 */
5150 if (Idte.Gate.u1DescType)
5151 {
5152 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5153 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5154 }
5155 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5156 switch (Idte.Gate.u4Type)
5157 {
5158 case AMD64_SEL_TYPE_SYS_INT_GATE:
5159 fEflToClear |= X86_EFL_IF;
5160 break;
5161 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5162 break;
5163
5164 default:
5165 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5166 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5167 }
5168
5169 /* Check DPL against CPL if applicable. */
5170 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5171 {
5172 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5175 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5176 }
5177 }
5178
5179 /* Is it there? */
5180 if (!Idte.Gate.u1Present)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5183 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185
5186 /* A null CS is bad. */
5187 RTSEL NewCS = Idte.Gate.u16Sel;
5188 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5189 {
5190 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5191 return iemRaiseGeneralProtectionFault0(pVCpu);
5192 }
5193
5194 /* Fetch the descriptor for the new CS. */
5195 IEMSELDESC DescCS;
5196 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5197 if (rcStrict != VINF_SUCCESS)
5198 {
5199 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5200 return rcStrict;
5201 }
5202
5203 /* Must be a 64-bit code segment. */
5204 if (!DescCS.Long.Gen.u1DescType)
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5207 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5208 }
5209 if ( !DescCS.Long.Gen.u1Long
5210 || DescCS.Long.Gen.u1DefBig
5211 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5214 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5215 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5216 }
5217
5218 /* Don't allow lowering the privilege level. For non-conforming CS
5219 selectors, the CS.DPL sets the privilege level the trap/interrupt
5220 handler runs at. For conforming CS selectors, the CPL remains
5221 unchanged, but the CS.DPL must be <= CPL. */
5222 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5223 * when CPU in Ring-0. Result \#GP? */
5224 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5225 {
5226 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5227 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5228 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5229 }
5230
5231
5232 /* Make sure the selector is present. */
5233 if (!DescCS.Legacy.Gen.u1Present)
5234 {
5235 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5236 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5237 }
5238
5239 /* Check that the new RIP is canonical. */
5240 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5241 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5242 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5243 if (!IEM_IS_CANONICAL(uNewRip))
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5246 return iemRaiseGeneralProtectionFault0(pVCpu);
5247 }
5248
5249 /*
5250 * If the privilege level changes or if the IST isn't zero, we need to get
5251 * a new stack from the TSS.
5252 */
5253 uint64_t uNewRsp;
5254 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5255 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5256 if ( uNewCpl != pVCpu->iem.s.uCpl
5257 || Idte.Gate.u3IST != 0)
5258 {
5259 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5260 if (rcStrict != VINF_SUCCESS)
5261 return rcStrict;
5262 }
5263 else
5264 uNewRsp = pCtx->rsp;
5265 uNewRsp &= ~(uint64_t)0xf;
5266
5267 /*
5268 * Calc the flag image to push.
5269 */
5270 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5271 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5272 fEfl &= ~X86_EFL_RF;
5273 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5274 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5275
5276 /*
5277 * Start making changes.
5278 */
5279 /* Set the new CPL so that stack accesses use it. */
5280 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5281 pVCpu->iem.s.uCpl = uNewCpl;
5282
5283 /* Create the stack frame. */
5284 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5285 RTPTRUNION uStackFrame;
5286 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5287 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5288 if (rcStrict != VINF_SUCCESS)
5289 return rcStrict;
5290 void * const pvStackFrame = uStackFrame.pv;
5291
5292 if (fFlags & IEM_XCPT_FLAGS_ERR)
5293 *uStackFrame.pu64++ = uErr;
5294 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5295 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5296 uStackFrame.pu64[2] = fEfl;
5297 uStackFrame.pu64[3] = pCtx->rsp;
5298 uStackFrame.pu64[4] = pCtx->ss.Sel;
5299 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5300 if (rcStrict != VINF_SUCCESS)
5301 return rcStrict;
5302
5303 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5304 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5305 * after pushing the stack frame? (Write protect the gdt + stack to
5306 * find out.) */
5307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5308 {
5309 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5310 if (rcStrict != VINF_SUCCESS)
5311 return rcStrict;
5312 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5313 }
5314
5315 /*
5316 * Start comitting the register changes.
5317 */
5318 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5319 * hidden registers when interrupting 32-bit or 16-bit code! */
5320 if (uNewCpl != uOldCpl)
5321 {
5322 pCtx->ss.Sel = 0 | uNewCpl;
5323 pCtx->ss.ValidSel = 0 | uNewCpl;
5324 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5325 pCtx->ss.u32Limit = UINT32_MAX;
5326 pCtx->ss.u64Base = 0;
5327 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5328 }
5329 pCtx->rsp = uNewRsp - cbStackFrame;
5330 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5331 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5333 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5334 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5335 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5336 pCtx->rip = uNewRip;
5337
5338 fEfl &= ~fEflToClear;
5339 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5340
5341 if (fFlags & IEM_XCPT_FLAGS_CR2)
5342 pCtx->cr2 = uCr2;
5343
5344 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5345 iemRaiseXcptAdjustState(pCtx, u8Vector);
5346
5347 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5348}
5349
5350
5351/**
5352 * Implements exceptions and interrupts.
5353 *
5354 * All exceptions and interrupts goes thru this function!
5355 *
5356 * @returns VBox strict status code.
5357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5358 * @param cbInstr The number of bytes to offset rIP by in the return
5359 * address.
5360 * @param u8Vector The interrupt / exception vector number.
5361 * @param fFlags The flags.
5362 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5363 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5364 */
5365DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5366iemRaiseXcptOrInt(PVMCPU pVCpu,
5367 uint8_t cbInstr,
5368 uint8_t u8Vector,
5369 uint32_t fFlags,
5370 uint16_t uErr,
5371 uint64_t uCr2)
5372{
5373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5374#ifdef IN_RING0
5375 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5376 AssertRCReturn(rc, rc);
5377#endif
5378
5379#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5380 /*
5381 * Flush prefetch buffer
5382 */
5383 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5384#endif
5385
5386 /*
5387 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5388 */
5389 if ( pCtx->eflags.Bits.u1VM
5390 && pCtx->eflags.Bits.u2IOPL != 3
5391 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5392 && (pCtx->cr0 & X86_CR0_PE) )
5393 {
5394 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5395 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5396 u8Vector = X86_XCPT_GP;
5397 uErr = 0;
5398 }
5399#ifdef DBGFTRACE_ENABLED
5400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5401 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5402 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5403#endif
5404
5405#ifdef VBOX_WITH_NESTED_HWVIRT
5406 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5407 {
5408 /*
5409 * If the event is being injected as part of VMRUN, it isn't subject to event
5410 * intercepts in the nested-guest. However, secondary exceptions that occur
5411 * during injection of any event -are- subject to exception intercepts.
5412 * See AMD spec. 15.20 "Event Injection".
5413 */
5414 if (!pCtx->hwvirt.svm.fInterceptEvents)
5415 pCtx->hwvirt.svm.fInterceptEvents = 1;
5416 else
5417 {
5418 /*
5419 * Check and handle if the event being raised is intercepted.
5420 */
5421 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5422 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5423 return rcStrict0;
5424 }
5425 }
5426#endif /* VBOX_WITH_NESTED_HWVIRT */
5427
5428 /*
5429 * Do recursion accounting.
5430 */
5431 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5432 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5433 if (pVCpu->iem.s.cXcptRecursions == 0)
5434 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5435 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5436 else
5437 {
5438 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5439 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5440 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5441
5442 if (pVCpu->iem.s.cXcptRecursions >= 3)
5443 {
5444#ifdef DEBUG_bird
5445 AssertFailed();
5446#endif
5447 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5448 }
5449
5450 /*
5451 * Evaluate the sequence of recurring events.
5452 */
5453 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5454 NULL /* pXcptRaiseInfo */);
5455 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5456 { /* likely */ }
5457 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5458 {
5459 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5460 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5461 u8Vector = X86_XCPT_DF;
5462 uErr = 0;
5463 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5464 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5465 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5466 }
5467 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5468 {
5469 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5470 return iemInitiateCpuShutdown(pVCpu);
5471 }
5472 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5473 {
5474 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5475 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5476 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5477 return VERR_EM_GUEST_CPU_HANG;
5478 }
5479 else
5480 {
5481 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5482 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5483 return VERR_IEM_IPE_9;
5484 }
5485
5486 /*
5487 * The 'EXT' bit is set when an exception occurs during deliver of an external
5488 * event (such as an interrupt or earlier exception)[1]. Privileged software
5489 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5490 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5491 *
5492 * [1] - Intel spec. 6.13 "Error Code"
5493 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5494 * [3] - Intel Instruction reference for INT n.
5495 */
5496 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5497 && (fFlags & IEM_XCPT_FLAGS_ERR)
5498 && u8Vector != X86_XCPT_PF
5499 && u8Vector != X86_XCPT_DF)
5500 {
5501 uErr |= X86_TRAP_ERR_EXTERNAL;
5502 }
5503 }
5504
5505 pVCpu->iem.s.cXcptRecursions++;
5506 pVCpu->iem.s.uCurXcpt = u8Vector;
5507 pVCpu->iem.s.fCurXcpt = fFlags;
5508 pVCpu->iem.s.uCurXcptErr = uErr;
5509 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5510
5511 /*
5512 * Extensive logging.
5513 */
5514#if defined(LOG_ENABLED) && defined(IN_RING3)
5515 if (LogIs3Enabled())
5516 {
5517 PVM pVM = pVCpu->CTX_SUFF(pVM);
5518 char szRegs[4096];
5519 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5520 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5521 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5522 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5523 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5524 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5525 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5526 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5527 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5528 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5529 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5530 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5531 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5532 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5533 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5534 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5535 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5536 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5537 " efer=%016VR{efer}\n"
5538 " pat=%016VR{pat}\n"
5539 " sf_mask=%016VR{sf_mask}\n"
5540 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5541 " lstar=%016VR{lstar}\n"
5542 " star=%016VR{star} cstar=%016VR{cstar}\n"
5543 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5544 );
5545
5546 char szInstr[256];
5547 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5548 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5549 szInstr, sizeof(szInstr), NULL);
5550 Log3(("%s%s\n", szRegs, szInstr));
5551 }
5552#endif /* LOG_ENABLED */
5553
5554 /*
5555 * Call the mode specific worker function.
5556 */
5557 VBOXSTRICTRC rcStrict;
5558 if (!(pCtx->cr0 & X86_CR0_PE))
5559 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5560 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5561 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5562 else
5563 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5564
5565 /* Flush the prefetch buffer. */
5566#ifdef IEM_WITH_CODE_TLB
5567 pVCpu->iem.s.pbInstrBuf = NULL;
5568#else
5569 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5570#endif
5571
5572 /*
5573 * Unwind.
5574 */
5575 pVCpu->iem.s.cXcptRecursions--;
5576 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5577 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5578 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5579 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5580 return rcStrict;
5581}
5582
5583#ifdef IEM_WITH_SETJMP
5584/**
5585 * See iemRaiseXcptOrInt. Will not return.
5586 */
5587IEM_STATIC DECL_NO_RETURN(void)
5588iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5589 uint8_t cbInstr,
5590 uint8_t u8Vector,
5591 uint32_t fFlags,
5592 uint16_t uErr,
5593 uint64_t uCr2)
5594{
5595 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5596 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5597}
5598#endif
5599
5600
5601/** \#DE - 00. */
5602DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5603{
5604 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5605}
5606
5607
5608/** \#DB - 01.
5609 * @note This automatically clear DR7.GD. */
5610DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5611{
5612 /** @todo set/clear RF. */
5613 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5615}
5616
5617
5618/** \#BR - 05. */
5619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5620{
5621 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5622}
5623
5624
5625/** \#UD - 06. */
5626DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5627{
5628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5629}
5630
5631
5632/** \#NM - 07. */
5633DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5634{
5635 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5636}
5637
5638
5639/** \#TS(err) - 0a. */
5640DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5641{
5642 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5643}
5644
5645
5646/** \#TS(tr) - 0a. */
5647DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5648{
5649 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5650 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5651}
5652
5653
5654/** \#TS(0) - 0a. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5656{
5657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5658 0, 0);
5659}
5660
5661
5662/** \#TS(err) - 0a. */
5663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5664{
5665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5666 uSel & X86_SEL_MASK_OFF_RPL, 0);
5667}
5668
5669
5670/** \#NP(err) - 0b. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5674}
5675
5676
5677/** \#NP(sel) - 0b. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5681 uSel & ~X86_SEL_RPL, 0);
5682}
5683
5684
5685/** \#SS(seg) - 0c. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5689 uSel & ~X86_SEL_RPL, 0);
5690}
5691
5692
5693/** \#SS(err) - 0c. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5697}
5698
5699
5700/** \#GP(n) - 0d. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5704}
5705
5706
5707/** \#GP(0) - 0d. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5711}
5712
5713#ifdef IEM_WITH_SETJMP
5714/** \#GP(0) - 0d. */
5715DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5716{
5717 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5718}
5719#endif
5720
5721
5722/** \#GP(sel) - 0d. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5726 Sel & ~X86_SEL_RPL, 0);
5727}
5728
5729
5730/** \#GP(0) - 0d. */
5731DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5732{
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735
5736
5737/** \#GP(sel) - 0d. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5739{
5740 NOREF(iSegReg); NOREF(fAccess);
5741 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5742 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5743}
5744
5745#ifdef IEM_WITH_SETJMP
5746/** \#GP(sel) - 0d, longjmp. */
5747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5748{
5749 NOREF(iSegReg); NOREF(fAccess);
5750 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5751 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5752}
5753#endif
5754
5755/** \#GP(sel) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5757{
5758 NOREF(Sel);
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5760}
5761
5762#ifdef IEM_WITH_SETJMP
5763/** \#GP(sel) - 0d, longjmp. */
5764DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5765{
5766 NOREF(Sel);
5767 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5768}
5769#endif
5770
5771
5772/** \#GP(sel) - 0d. */
5773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5774{
5775 NOREF(iSegReg); NOREF(fAccess);
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5777}
5778
5779#ifdef IEM_WITH_SETJMP
5780/** \#GP(sel) - 0d, longjmp. */
5781DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5782 uint32_t fAccess)
5783{
5784 NOREF(iSegReg); NOREF(fAccess);
5785 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5786}
5787#endif
5788
5789
5790/** \#PF(n) - 0e. */
5791DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5792{
5793 uint16_t uErr;
5794 switch (rc)
5795 {
5796 case VERR_PAGE_NOT_PRESENT:
5797 case VERR_PAGE_TABLE_NOT_PRESENT:
5798 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5799 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5800 uErr = 0;
5801 break;
5802
5803 default:
5804 AssertMsgFailed(("%Rrc\n", rc));
5805 RT_FALL_THRU();
5806 case VERR_ACCESS_DENIED:
5807 uErr = X86_TRAP_PF_P;
5808 break;
5809
5810 /** @todo reserved */
5811 }
5812
5813 if (pVCpu->iem.s.uCpl == 3)
5814 uErr |= X86_TRAP_PF_US;
5815
5816 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5817 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5818 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5819 uErr |= X86_TRAP_PF_ID;
5820
5821#if 0 /* This is so much non-sense, really. Why was it done like that? */
5822 /* Note! RW access callers reporting a WRITE protection fault, will clear
5823 the READ flag before calling. So, read-modify-write accesses (RW)
5824 can safely be reported as READ faults. */
5825 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5826 uErr |= X86_TRAP_PF_RW;
5827#else
5828 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5829 {
5830 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5831 uErr |= X86_TRAP_PF_RW;
5832 }
5833#endif
5834
5835 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5836 uErr, GCPtrWhere);
5837}
5838
5839#ifdef IEM_WITH_SETJMP
5840/** \#PF(n) - 0e, longjmp. */
5841IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5842{
5843 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5844}
5845#endif
5846
5847
5848/** \#MF(0) - 10. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5850{
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5852}
5853
5854
5855/** \#AC(0) - 11. */
5856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5857{
5858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5859}
5860
5861
5862/**
5863 * Macro for calling iemCImplRaiseDivideError().
5864 *
5865 * This enables us to add/remove arguments and force different levels of
5866 * inlining as we wish.
5867 *
5868 * @return Strict VBox status code.
5869 */
5870#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5871IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5872{
5873 NOREF(cbInstr);
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5875}
5876
5877
5878/**
5879 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5880 *
5881 * This enables us to add/remove arguments and force different levels of
5882 * inlining as we wish.
5883 *
5884 * @return Strict VBox status code.
5885 */
5886#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5887IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5888{
5889 NOREF(cbInstr);
5890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5891}
5892
5893
5894/**
5895 * Macro for calling iemCImplRaiseInvalidOpcode().
5896 *
5897 * This enables us to add/remove arguments and force different levels of
5898 * inlining as we wish.
5899 *
5900 * @return Strict VBox status code.
5901 */
5902#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5903IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5904{
5905 NOREF(cbInstr);
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/** @} */
5911
5912
5913/*
5914 *
5915 * Helpers routines.
5916 * Helpers routines.
5917 * Helpers routines.
5918 *
5919 */
5920
5921/**
5922 * Recalculates the effective operand size.
5923 *
5924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5925 */
5926IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5927{
5928 switch (pVCpu->iem.s.enmCpuMode)
5929 {
5930 case IEMMODE_16BIT:
5931 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5932 break;
5933 case IEMMODE_32BIT:
5934 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5935 break;
5936 case IEMMODE_64BIT:
5937 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5938 {
5939 case 0:
5940 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5941 break;
5942 case IEM_OP_PRF_SIZE_OP:
5943 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5944 break;
5945 case IEM_OP_PRF_SIZE_REX_W:
5946 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5947 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5948 break;
5949 }
5950 break;
5951 default:
5952 AssertFailed();
5953 }
5954}
5955
5956
5957/**
5958 * Sets the default operand size to 64-bit and recalculates the effective
5959 * operand size.
5960 *
5961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5962 */
5963IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5964{
5965 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5966 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5967 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5968 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5969 else
5970 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5971}
5972
5973
5974/*
5975 *
5976 * Common opcode decoders.
5977 * Common opcode decoders.
5978 * Common opcode decoders.
5979 *
5980 */
5981//#include <iprt/mem.h>
5982
5983/**
5984 * Used to add extra details about a stub case.
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 */
5987IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5988{
5989#if defined(LOG_ENABLED) && defined(IN_RING3)
5990 PVM pVM = pVCpu->CTX_SUFF(pVM);
5991 char szRegs[4096];
5992 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5993 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5994 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5995 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5996 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5997 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5998 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5999 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6000 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6001 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6002 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6003 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6004 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6005 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6006 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6007 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6008 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6009 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6010 " efer=%016VR{efer}\n"
6011 " pat=%016VR{pat}\n"
6012 " sf_mask=%016VR{sf_mask}\n"
6013 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6014 " lstar=%016VR{lstar}\n"
6015 " star=%016VR{star} cstar=%016VR{cstar}\n"
6016 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6017 );
6018
6019 char szInstr[256];
6020 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6021 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6022 szInstr, sizeof(szInstr), NULL);
6023
6024 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6025#else
6026 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6027#endif
6028}
6029
6030/**
6031 * Complains about a stub.
6032 *
6033 * Providing two versions of this macro, one for daily use and one for use when
6034 * working on IEM.
6035 */
6036#if 0
6037# define IEMOP_BITCH_ABOUT_STUB() \
6038 do { \
6039 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6040 iemOpStubMsg2(pVCpu); \
6041 RTAssertPanic(); \
6042 } while (0)
6043#else
6044# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6045#endif
6046
6047/** Stubs an opcode. */
6048#define FNIEMOP_STUB(a_Name) \
6049 FNIEMOP_DEF(a_Name) \
6050 { \
6051 RT_NOREF_PV(pVCpu); \
6052 IEMOP_BITCH_ABOUT_STUB(); \
6053 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6054 } \
6055 typedef int ignore_semicolon
6056
6057/** Stubs an opcode. */
6058#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6059 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6060 { \
6061 RT_NOREF_PV(pVCpu); \
6062 RT_NOREF_PV(a_Name0); \
6063 IEMOP_BITCH_ABOUT_STUB(); \
6064 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6065 } \
6066 typedef int ignore_semicolon
6067
6068/** Stubs an opcode which currently should raise \#UD. */
6069#define FNIEMOP_UD_STUB(a_Name) \
6070 FNIEMOP_DEF(a_Name) \
6071 { \
6072 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6073 return IEMOP_RAISE_INVALID_OPCODE(); \
6074 } \
6075 typedef int ignore_semicolon
6076
6077/** Stubs an opcode which currently should raise \#UD. */
6078#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6079 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6080 { \
6081 RT_NOREF_PV(pVCpu); \
6082 RT_NOREF_PV(a_Name0); \
6083 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6084 return IEMOP_RAISE_INVALID_OPCODE(); \
6085 } \
6086 typedef int ignore_semicolon
6087
6088
6089
6090/** @name Register Access.
6091 * @{
6092 */
6093
6094/**
6095 * Gets a reference (pointer) to the specified hidden segment register.
6096 *
6097 * @returns Hidden register reference.
6098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6099 * @param iSegReg The segment register.
6100 */
6101IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6102{
6103 Assert(iSegReg < X86_SREG_COUNT);
6104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6105 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6106
6107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6108 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6109 { /* likely */ }
6110 else
6111 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6112#else
6113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6114#endif
6115 return pSReg;
6116}
6117
6118
6119/**
6120 * Ensures that the given hidden segment register is up to date.
6121 *
6122 * @returns Hidden register reference.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 * @param pSReg The segment register.
6125 */
6126IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6127{
6128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6129 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6130 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6131#else
6132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6133 NOREF(pVCpu);
6134#endif
6135 return pSReg;
6136}
6137
6138
6139/**
6140 * Gets a reference (pointer) to the specified segment register (the selector
6141 * value).
6142 *
6143 * @returns Pointer to the selector variable.
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 * @param iSegReg The segment register.
6146 */
6147DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6148{
6149 Assert(iSegReg < X86_SREG_COUNT);
6150 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6151 return &pCtx->aSRegs[iSegReg].Sel;
6152}
6153
6154
6155/**
6156 * Fetches the selector value of a segment register.
6157 *
6158 * @returns The selector value.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6166}
6167
6168
6169/**
6170 * Fetches the base address value of a segment register.
6171 *
6172 * @returns The selector value.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 * @param iSegReg The segment register.
6175 */
6176DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6177{
6178 Assert(iSegReg < X86_SREG_COUNT);
6179 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6180}
6181
6182
6183/**
6184 * Gets a reference (pointer) to the specified general purpose register.
6185 *
6186 * @returns Register reference.
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param iReg The general purpose register.
6189 */
6190DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6191{
6192 Assert(iReg < 16);
6193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6194 return &pCtx->aGRegs[iReg];
6195}
6196
6197
6198/**
6199 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6200 *
6201 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6202 *
6203 * @returns Register reference.
6204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6205 * @param iReg The register.
6206 */
6207DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6208{
6209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6210 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6211 {
6212 Assert(iReg < 16);
6213 return &pCtx->aGRegs[iReg].u8;
6214 }
6215 /* high 8-bit register. */
6216 Assert(iReg < 8);
6217 return &pCtx->aGRegs[iReg & 3].bHi;
6218}
6219
6220
6221/**
6222 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6223 *
6224 * @returns Register reference.
6225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6226 * @param iReg The register.
6227 */
6228DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6229{
6230 Assert(iReg < 16);
6231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6232 return &pCtx->aGRegs[iReg].u16;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u32;
6248}
6249
6250
6251/**
6252 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6253 *
6254 * @returns Register reference.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 Assert(iReg < 64);
6261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6262 return &pCtx->aGRegs[iReg].u64;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified segment register's base address.
6268 *
6269 * @returns Segment register base address reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iSegReg The segment selector.
6272 */
6273DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6274{
6275 Assert(iSegReg < X86_SREG_COUNT);
6276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6277 return &pCtx->aSRegs[iSegReg].u64Base;
6278}
6279
6280
6281/**
6282 * Fetches the value of a 8-bit general purpose register.
6283 *
6284 * @returns The register value.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iReg The register.
6287 */
6288DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6289{
6290 return *iemGRegRefU8(pVCpu, iReg);
6291}
6292
6293
6294/**
6295 * Fetches the value of a 16-bit general purpose register.
6296 *
6297 * @returns The register value.
6298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6299 * @param iReg The register.
6300 */
6301DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6302{
6303 Assert(iReg < 16);
6304 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6305}
6306
6307
6308/**
6309 * Fetches the value of a 32-bit general purpose register.
6310 *
6311 * @returns The register value.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iReg The register.
6314 */
6315DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6316{
6317 Assert(iReg < 16);
6318 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6319}
6320
6321
6322/**
6323 * Fetches the value of a 64-bit general purpose register.
6324 *
6325 * @returns The register value.
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param iReg The register.
6328 */
6329DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6330{
6331 Assert(iReg < 16);
6332 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6333}
6334
6335
6336/**
6337 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6338 *
6339 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6340 * segment limit.
6341 *
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param offNextInstr The offset of the next instruction.
6344 */
6345IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6346{
6347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6348 switch (pVCpu->iem.s.enmEffOpSize)
6349 {
6350 case IEMMODE_16BIT:
6351 {
6352 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6353 if ( uNewIp > pCtx->cs.u32Limit
6354 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6355 return iemRaiseGeneralProtectionFault0(pVCpu);
6356 pCtx->rip = uNewIp;
6357 break;
6358 }
6359
6360 case IEMMODE_32BIT:
6361 {
6362 Assert(pCtx->rip <= UINT32_MAX);
6363 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6364
6365 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6366 if (uNewEip > pCtx->cs.u32Limit)
6367 return iemRaiseGeneralProtectionFault0(pVCpu);
6368 pCtx->rip = uNewEip;
6369 break;
6370 }
6371
6372 case IEMMODE_64BIT:
6373 {
6374 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6375
6376 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6377 if (!IEM_IS_CANONICAL(uNewRip))
6378 return iemRaiseGeneralProtectionFault0(pVCpu);
6379 pCtx->rip = uNewRip;
6380 break;
6381 }
6382
6383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6384 }
6385
6386 pCtx->eflags.Bits.u1RF = 0;
6387
6388#ifndef IEM_WITH_CODE_TLB
6389 /* Flush the prefetch buffer. */
6390 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6391#endif
6392
6393 return VINF_SUCCESS;
6394}
6395
6396
6397/**
6398 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6399 *
6400 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6401 * segment limit.
6402 *
6403 * @returns Strict VBox status code.
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param offNextInstr The offset of the next instruction.
6406 */
6407IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6408{
6409 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6410 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6411
6412 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6413 if ( uNewIp > pCtx->cs.u32Limit
6414 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6415 return iemRaiseGeneralProtectionFault0(pVCpu);
6416 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6417 pCtx->rip = uNewIp;
6418 pCtx->eflags.Bits.u1RF = 0;
6419
6420#ifndef IEM_WITH_CODE_TLB
6421 /* Flush the prefetch buffer. */
6422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6423#endif
6424
6425 return VINF_SUCCESS;
6426}
6427
6428
6429/**
6430 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6431 *
6432 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6433 * segment limit.
6434 *
6435 * @returns Strict VBox status code.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param offNextInstr The offset of the next instruction.
6438 */
6439IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6440{
6441 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6442 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6443
6444 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6445 {
6446 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6447
6448 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6449 if (uNewEip > pCtx->cs.u32Limit)
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 pCtx->rip = uNewEip;
6452 }
6453 else
6454 {
6455 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6456
6457 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6458 if (!IEM_IS_CANONICAL(uNewRip))
6459 return iemRaiseGeneralProtectionFault0(pVCpu);
6460 pCtx->rip = uNewRip;
6461 }
6462 pCtx->eflags.Bits.u1RF = 0;
6463
6464#ifndef IEM_WITH_CODE_TLB
6465 /* Flush the prefetch buffer. */
6466 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6467#endif
6468
6469 return VINF_SUCCESS;
6470}
6471
6472
6473/**
6474 * Performs a near jump to the specified address.
6475 *
6476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6477 * segment limit.
6478 *
6479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6480 * @param uNewRip The new RIP value.
6481 */
6482IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6483{
6484 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6485 switch (pVCpu->iem.s.enmEffOpSize)
6486 {
6487 case IEMMODE_16BIT:
6488 {
6489 Assert(uNewRip <= UINT16_MAX);
6490 if ( uNewRip > pCtx->cs.u32Limit
6491 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6492 return iemRaiseGeneralProtectionFault0(pVCpu);
6493 /** @todo Test 16-bit jump in 64-bit mode. */
6494 pCtx->rip = uNewRip;
6495 break;
6496 }
6497
6498 case IEMMODE_32BIT:
6499 {
6500 Assert(uNewRip <= UINT32_MAX);
6501 Assert(pCtx->rip <= UINT32_MAX);
6502 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6503
6504 if (uNewRip > pCtx->cs.u32Limit)
6505 return iemRaiseGeneralProtectionFault0(pVCpu);
6506 pCtx->rip = uNewRip;
6507 break;
6508 }
6509
6510 case IEMMODE_64BIT:
6511 {
6512 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6513
6514 if (!IEM_IS_CANONICAL(uNewRip))
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pCtx->rip = uNewRip;
6517 break;
6518 }
6519
6520 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6521 }
6522
6523 pCtx->eflags.Bits.u1RF = 0;
6524
6525#ifndef IEM_WITH_CODE_TLB
6526 /* Flush the prefetch buffer. */
6527 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6528#endif
6529
6530 return VINF_SUCCESS;
6531}
6532
6533
6534/**
6535 * Get the address of the top of the stack.
6536 *
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param pCtx The CPU context which SP/ESP/RSP should be
6539 * read.
6540 */
6541DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6542{
6543 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6544 return pCtx->rsp;
6545 if (pCtx->ss.Attr.n.u1DefBig)
6546 return pCtx->esp;
6547 return pCtx->sp;
6548}
6549
6550
6551/**
6552 * Updates the RIP/EIP/IP to point to the next instruction.
6553 *
6554 * This function leaves the EFLAGS.RF flag alone.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param cbInstr The number of bytes to add.
6558 */
6559IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6560{
6561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6562 switch (pVCpu->iem.s.enmCpuMode)
6563 {
6564 case IEMMODE_16BIT:
6565 Assert(pCtx->rip <= UINT16_MAX);
6566 pCtx->eip += cbInstr;
6567 pCtx->eip &= UINT32_C(0xffff);
6568 break;
6569
6570 case IEMMODE_32BIT:
6571 pCtx->eip += cbInstr;
6572 Assert(pCtx->rip <= UINT32_MAX);
6573 break;
6574
6575 case IEMMODE_64BIT:
6576 pCtx->rip += cbInstr;
6577 break;
6578 default: AssertFailed();
6579 }
6580}
6581
6582
6583#if 0
6584/**
6585 * Updates the RIP/EIP/IP to point to the next instruction.
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6590{
6591 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6592}
6593#endif
6594
6595
6596
6597/**
6598 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6599 *
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 * @param cbInstr The number of bytes to add.
6602 */
6603IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6604{
6605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6606
6607 pCtx->eflags.Bits.u1RF = 0;
6608
6609 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6610#if ARCH_BITS >= 64
6611 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6612 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6613 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6614#else
6615 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6616 pCtx->rip += cbInstr;
6617 else
6618 pCtx->eip += cbInstr;
6619#endif
6620}
6621
6622
6623/**
6624 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 */
6628IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6629{
6630 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6631}
6632
6633
6634/**
6635 * Adds to the stack pointer.
6636 *
6637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6638 * @param pCtx The CPU context which SP/ESP/RSP should be
6639 * updated.
6640 * @param cbToAdd The number of bytes to add (8-bit!).
6641 */
6642DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6643{
6644 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6645 pCtx->rsp += cbToAdd;
6646 else if (pCtx->ss.Attr.n.u1DefBig)
6647 pCtx->esp += cbToAdd;
6648 else
6649 pCtx->sp += cbToAdd;
6650}
6651
6652
6653/**
6654 * Subtracts from the stack pointer.
6655 *
6656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6657 * @param pCtx The CPU context which SP/ESP/RSP should be
6658 * updated.
6659 * @param cbToSub The number of bytes to subtract (8-bit!).
6660 */
6661DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6662{
6663 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6664 pCtx->rsp -= cbToSub;
6665 else if (pCtx->ss.Attr.n.u1DefBig)
6666 pCtx->esp -= cbToSub;
6667 else
6668 pCtx->sp -= cbToSub;
6669}
6670
6671
6672/**
6673 * Adds to the temporary stack pointer.
6674 *
6675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6676 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6677 * @param cbToAdd The number of bytes to add (16-bit).
6678 * @param pCtx Where to get the current stack mode.
6679 */
6680DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6681{
6682 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6683 pTmpRsp->u += cbToAdd;
6684 else if (pCtx->ss.Attr.n.u1DefBig)
6685 pTmpRsp->DWords.dw0 += cbToAdd;
6686 else
6687 pTmpRsp->Words.w0 += cbToAdd;
6688}
6689
6690
6691/**
6692 * Subtracts from the temporary stack pointer.
6693 *
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6696 * @param cbToSub The number of bytes to subtract.
6697 * @param pCtx Where to get the current stack mode.
6698 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6699 * expecting that.
6700 */
6701DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u -= cbToSub;
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 -= cbToSub;
6707 else
6708 pTmpRsp->Words.w0 -= cbToSub;
6709}
6710
6711
6712/**
6713 * Calculates the effective stack address for a push of the specified size as
6714 * well as the new RSP value (upper bits may be masked).
6715 *
6716 * @returns Effective stack addressf for the push.
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param pCtx Where to get the current stack mode.
6719 * @param cbItem The size of the stack item to pop.
6720 * @param puNewRsp Where to return the new RSP value.
6721 */
6722DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6723{
6724 RTUINT64U uTmpRsp;
6725 RTGCPTR GCPtrTop;
6726 uTmpRsp.u = pCtx->rsp;
6727
6728 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6729 GCPtrTop = uTmpRsp.u -= cbItem;
6730 else if (pCtx->ss.Attr.n.u1DefBig)
6731 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6732 else
6733 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6734 *puNewRsp = uTmpRsp.u;
6735 return GCPtrTop;
6736}
6737
6738
6739/**
6740 * Gets the current stack pointer and calculates the value after a pop of the
6741 * specified size.
6742 *
6743 * @returns Current stack pointer.
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param pCtx Where to get the current stack mode.
6746 * @param cbItem The size of the stack item to pop.
6747 * @param puNewRsp Where to return the new RSP value.
6748 */
6749DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6750{
6751 RTUINT64U uTmpRsp;
6752 RTGCPTR GCPtrTop;
6753 uTmpRsp.u = pCtx->rsp;
6754
6755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6756 {
6757 GCPtrTop = uTmpRsp.u;
6758 uTmpRsp.u += cbItem;
6759 }
6760 else if (pCtx->ss.Attr.n.u1DefBig)
6761 {
6762 GCPtrTop = uTmpRsp.DWords.dw0;
6763 uTmpRsp.DWords.dw0 += cbItem;
6764 }
6765 else
6766 {
6767 GCPtrTop = uTmpRsp.Words.w0;
6768 uTmpRsp.Words.w0 += cbItem;
6769 }
6770 *puNewRsp = uTmpRsp.u;
6771 return GCPtrTop;
6772}
6773
6774
6775/**
6776 * Calculates the effective stack address for a push of the specified size as
6777 * well as the new temporary RSP value (upper bits may be masked).
6778 *
6779 * @returns Effective stack addressf for the push.
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param pCtx Where to get the current stack mode.
6782 * @param pTmpRsp The temporary stack pointer. This is updated.
6783 * @param cbItem The size of the stack item to pop.
6784 */
6785DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6786{
6787 RTGCPTR GCPtrTop;
6788
6789 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6790 GCPtrTop = pTmpRsp->u -= cbItem;
6791 else if (pCtx->ss.Attr.n.u1DefBig)
6792 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6793 else
6794 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6795 return GCPtrTop;
6796}
6797
6798
6799/**
6800 * Gets the effective stack address for a pop of the specified size and
6801 * calculates and updates the temporary RSP.
6802 *
6803 * @returns Current stack pointer.
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param pCtx Where to get the current stack mode.
6806 * @param pTmpRsp The temporary stack pointer. This is updated.
6807 * @param cbItem The size of the stack item to pop.
6808 */
6809DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6810{
6811 RTGCPTR GCPtrTop;
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 {
6814 GCPtrTop = pTmpRsp->u;
6815 pTmpRsp->u += cbItem;
6816 }
6817 else if (pCtx->ss.Attr.n.u1DefBig)
6818 {
6819 GCPtrTop = pTmpRsp->DWords.dw0;
6820 pTmpRsp->DWords.dw0 += cbItem;
6821 }
6822 else
6823 {
6824 GCPtrTop = pTmpRsp->Words.w0;
6825 pTmpRsp->Words.w0 += cbItem;
6826 }
6827 return GCPtrTop;
6828}
6829
6830/** @} */
6831
6832
6833/** @name FPU access and helpers.
6834 *
6835 * @{
6836 */
6837
6838
6839/**
6840 * Hook for preparing to use the host FPU.
6841 *
6842 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 */
6846DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6847{
6848#ifdef IN_RING3
6849 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6850#else
6851 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6852#endif
6853}
6854
6855
6856/**
6857 * Hook for preparing to use the host FPU for SSE.
6858 *
6859 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6860 *
6861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6862 */
6863DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6864{
6865 iemFpuPrepareUsage(pVCpu);
6866}
6867
6868
6869/**
6870 * Hook for preparing to use the host FPU for AVX.
6871 *
6872 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6873 *
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 */
6876DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6877{
6878 iemFpuPrepareUsage(pVCpu);
6879}
6880
6881
6882/**
6883 * Hook for actualizing the guest FPU state before the interpreter reads it.
6884 *
6885 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6886 *
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 */
6889DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6890{
6891#ifdef IN_RING3
6892 NOREF(pVCpu);
6893#else
6894 CPUMRZFpuStateActualizeForRead(pVCpu);
6895#endif
6896}
6897
6898
6899/**
6900 * Hook for actualizing the guest FPU state before the interpreter changes it.
6901 *
6902 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6903 *
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 */
6906DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6907{
6908#ifdef IN_RING3
6909 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6910#else
6911 CPUMRZFpuStateActualizeForChange(pVCpu);
6912#endif
6913}
6914
6915
6916/**
6917 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6918 * only.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6925{
6926#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6927 NOREF(pVCpu);
6928#else
6929 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6930#endif
6931}
6932
6933
6934/**
6935 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6936 * read+write.
6937 *
6938 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6943{
6944#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6945 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6946#else
6947 CPUMRZFpuStateActualizeForChange(pVCpu);
6948#endif
6949}
6950
6951
6952/**
6953 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6954 * only.
6955 *
6956 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 */
6960DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6961{
6962#ifdef IN_RING3
6963 NOREF(pVCpu);
6964#else
6965 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6966#endif
6967}
6968
6969
6970/**
6971 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6972 * read+write.
6973 *
6974 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 */
6978DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6979{
6980#ifdef IN_RING3
6981 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6982#else
6983 CPUMRZFpuStateActualizeForChange(pVCpu);
6984#endif
6985}
6986
6987
6988/**
6989 * Stores a QNaN value into a FPU register.
6990 *
6991 * @param pReg Pointer to the register.
6992 */
6993DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6994{
6995 pReg->au32[0] = UINT32_C(0x00000000);
6996 pReg->au32[1] = UINT32_C(0xc0000000);
6997 pReg->au16[4] = UINT16_C(0xffff);
6998}
6999
7000
7001/**
7002 * Updates the FOP, FPU.CS and FPUIP registers.
7003 *
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 * @param pCtx The CPU context.
7006 * @param pFpuCtx The FPU context.
7007 */
7008DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7009{
7010 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7011 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7012 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7013 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7014 {
7015 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7016 * happens in real mode here based on the fnsave and fnstenv images. */
7017 pFpuCtx->CS = 0;
7018 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7019 }
7020 else
7021 {
7022 pFpuCtx->CS = pCtx->cs.Sel;
7023 pFpuCtx->FPUIP = pCtx->rip;
7024 }
7025}
7026
7027
7028/**
7029 * Updates the x87.DS and FPUDP registers.
7030 *
7031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7032 * @param pCtx The CPU context.
7033 * @param pFpuCtx The FPU context.
7034 * @param iEffSeg The effective segment register.
7035 * @param GCPtrEff The effective address relative to @a iEffSeg.
7036 */
7037DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7038{
7039 RTSEL sel;
7040 switch (iEffSeg)
7041 {
7042 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7043 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7044 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7045 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7046 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7047 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7048 default:
7049 AssertMsgFailed(("%d\n", iEffSeg));
7050 sel = pCtx->ds.Sel;
7051 }
7052 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7053 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7054 {
7055 pFpuCtx->DS = 0;
7056 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7057 }
7058 else
7059 {
7060 pFpuCtx->DS = sel;
7061 pFpuCtx->FPUDP = GCPtrEff;
7062 }
7063}
7064
7065
7066/**
7067 * Rotates the stack registers in the push direction.
7068 *
7069 * @param pFpuCtx The FPU context.
7070 * @remarks This is a complete waste of time, but fxsave stores the registers in
7071 * stack order.
7072 */
7073DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7074{
7075 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7076 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7077 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7078 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7079 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7080 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7081 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7082 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7083 pFpuCtx->aRegs[0].r80 = r80Tmp;
7084}
7085
7086
7087/**
7088 * Rotates the stack registers in the pop direction.
7089 *
7090 * @param pFpuCtx The FPU context.
7091 * @remarks This is a complete waste of time, but fxsave stores the registers in
7092 * stack order.
7093 */
7094DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7095{
7096 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7097 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7098 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7099 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7100 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7101 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7102 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7103 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7104 pFpuCtx->aRegs[7].r80 = r80Tmp;
7105}
7106
7107
7108/**
7109 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7110 * exception prevents it.
7111 *
7112 * @param pResult The FPU operation result to push.
7113 * @param pFpuCtx The FPU context.
7114 */
7115IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7116{
7117 /* Update FSW and bail if there are pending exceptions afterwards. */
7118 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7119 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7120 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7121 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7122 {
7123 pFpuCtx->FSW = fFsw;
7124 return;
7125 }
7126
7127 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7128 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7129 {
7130 /* All is fine, push the actual value. */
7131 pFpuCtx->FTW |= RT_BIT(iNewTop);
7132 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7133 }
7134 else if (pFpuCtx->FCW & X86_FCW_IM)
7135 {
7136 /* Masked stack overflow, push QNaN. */
7137 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7138 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7139 }
7140 else
7141 {
7142 /* Raise stack overflow, don't push anything. */
7143 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7144 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7145 return;
7146 }
7147
7148 fFsw &= ~X86_FSW_TOP_MASK;
7149 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7150 pFpuCtx->FSW = fFsw;
7151
7152 iemFpuRotateStackPush(pFpuCtx);
7153}
7154
7155
7156/**
7157 * Stores a result in a FPU register and updates the FSW and FTW.
7158 *
7159 * @param pFpuCtx The FPU context.
7160 * @param pResult The result to store.
7161 * @param iStReg Which FPU register to store it in.
7162 */
7163IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7164{
7165 Assert(iStReg < 8);
7166 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7167 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7168 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7169 pFpuCtx->FTW |= RT_BIT(iReg);
7170 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7171}
7172
7173
7174/**
7175 * Only updates the FPU status word (FSW) with the result of the current
7176 * instruction.
7177 *
7178 * @param pFpuCtx The FPU context.
7179 * @param u16FSW The FSW output of the current instruction.
7180 */
7181IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7182{
7183 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7185}
7186
7187
7188/**
7189 * Pops one item off the FPU stack if no pending exception prevents it.
7190 *
7191 * @param pFpuCtx The FPU context.
7192 */
7193IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7194{
7195 /* Check pending exceptions. */
7196 uint16_t uFSW = pFpuCtx->FSW;
7197 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7198 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7199 return;
7200
7201 /* TOP--. */
7202 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7203 uFSW &= ~X86_FSW_TOP_MASK;
7204 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7205 pFpuCtx->FSW = uFSW;
7206
7207 /* Mark the previous ST0 as empty. */
7208 iOldTop >>= X86_FSW_TOP_SHIFT;
7209 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7210
7211 /* Rotate the registers. */
7212 iemFpuRotateStackPop(pFpuCtx);
7213}
7214
7215
7216/**
7217 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7218 *
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pResult The FPU operation result to push.
7221 */
7222IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7223{
7224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7225 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7227 iemFpuMaybePushResult(pResult, pFpuCtx);
7228}
7229
7230
7231/**
7232 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7233 * and sets FPUDP and FPUDS.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pResult The FPU operation result to push.
7237 * @param iEffSeg The effective segment register.
7238 * @param GCPtrEff The effective address relative to @a iEffSeg.
7239 */
7240IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7241{
7242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7243 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7244 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7246 iemFpuMaybePushResult(pResult, pFpuCtx);
7247}
7248
7249
7250/**
7251 * Replace ST0 with the first value and push the second onto the FPU stack,
7252 * unless a pending exception prevents it.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pResult The FPU operation result to store and push.
7256 */
7257IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7258{
7259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7260 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262
7263 /* Update FSW and bail if there are pending exceptions afterwards. */
7264 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7265 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7266 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7267 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7268 {
7269 pFpuCtx->FSW = fFsw;
7270 return;
7271 }
7272
7273 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7274 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7275 {
7276 /* All is fine, push the actual value. */
7277 pFpuCtx->FTW |= RT_BIT(iNewTop);
7278 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7279 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7280 }
7281 else if (pFpuCtx->FCW & X86_FCW_IM)
7282 {
7283 /* Masked stack overflow, push QNaN. */
7284 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7285 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7286 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7287 }
7288 else
7289 {
7290 /* Raise stack overflow, don't push anything. */
7291 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7292 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7293 return;
7294 }
7295
7296 fFsw &= ~X86_FSW_TOP_MASK;
7297 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7298 pFpuCtx->FSW = fFsw;
7299
7300 iemFpuRotateStackPush(pFpuCtx);
7301}
7302
7303
7304/**
7305 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7306 * FOP.
7307 *
7308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7309 * @param pResult The result to store.
7310 * @param iStReg Which FPU register to store it in.
7311 */
7312IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7313{
7314 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7315 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7316 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7317 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7318}
7319
7320
7321/**
7322 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7323 * FOP, and then pops the stack.
7324 *
7325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7326 * @param pResult The result to store.
7327 * @param iStReg Which FPU register to store it in.
7328 */
7329IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7330{
7331 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7332 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7333 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7334 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7335 iemFpuMaybePopOne(pFpuCtx);
7336}
7337
7338
7339/**
7340 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7341 * FPUDP, and FPUDS.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pResult The result to store.
7345 * @param iStReg Which FPU register to store it in.
7346 * @param iEffSeg The effective memory operand selector register.
7347 * @param GCPtrEff The effective memory operand offset.
7348 */
7349IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7350 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7351{
7352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7353 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7354 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7356 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7357}
7358
7359
7360/**
7361 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7362 * FPUDP, and FPUDS, and then pops the stack.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The result to store.
7366 * @param iStReg Which FPU register to store it in.
7367 * @param iEffSeg The effective memory operand selector register.
7368 * @param GCPtrEff The effective memory operand offset.
7369 */
7370IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7371 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7372{
7373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7374 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7375 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7377 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7378 iemFpuMaybePopOne(pFpuCtx);
7379}
7380
7381
7382/**
7383 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 */
7387IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7388{
7389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7390 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7392}
7393
7394
7395/**
7396 * Marks the specified stack register as free (for FFREE).
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param iStReg The register to free.
7400 */
7401IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7402{
7403 Assert(iStReg < 8);
7404 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7405 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7406 pFpuCtx->FTW &= ~RT_BIT(iReg);
7407}
7408
7409
7410/**
7411 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 */
7415IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7416{
7417 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7418 uint16_t uFsw = pFpuCtx->FSW;
7419 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7420 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7421 uFsw &= ~X86_FSW_TOP_MASK;
7422 uFsw |= uTop;
7423 pFpuCtx->FSW = uFsw;
7424}
7425
7426
7427/**
7428 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 */
7432IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7433{
7434 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7435 uint16_t uFsw = pFpuCtx->FSW;
7436 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7437 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7438 uFsw &= ~X86_FSW_TOP_MASK;
7439 uFsw |= uTop;
7440 pFpuCtx->FSW = uFsw;
7441}
7442
7443
7444/**
7445 * Updates the FSW, FOP, FPUIP, and FPUCS.
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param u16FSW The FSW from the current instruction.
7449 */
7450IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7451{
7452 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7453 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7454 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7455 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7456}
7457
7458
7459/**
7460 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param u16FSW The FSW from the current instruction.
7464 */
7465IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7466{
7467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7468 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7470 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7471 iemFpuMaybePopOne(pFpuCtx);
7472}
7473
7474
7475/**
7476 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param u16FSW The FSW from the current instruction.
7480 * @param iEffSeg The effective memory operand selector register.
7481 * @param GCPtrEff The effective memory operand offset.
7482 */
7483IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7484{
7485 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7486 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7487 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7488 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7489 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7490}
7491
7492
7493/**
7494 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param u16FSW The FSW from the current instruction.
7498 */
7499IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7500{
7501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7502 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505 iemFpuMaybePopOne(pFpuCtx);
7506 iemFpuMaybePopOne(pFpuCtx);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 * @param iEffSeg The effective memory operand selector register.
7516 * @param GCPtrEff The effective memory operand offset.
7517 */
7518IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7519{
7520 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7524 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7525 iemFpuMaybePopOne(pFpuCtx);
7526}
7527
7528
7529/**
7530 * Worker routine for raising an FPU stack underflow exception.
7531 *
7532 * @param pFpuCtx The FPU context.
7533 * @param iStReg The stack register being accessed.
7534 */
7535IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7536{
7537 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7538 if (pFpuCtx->FCW & X86_FCW_IM)
7539 {
7540 /* Masked underflow. */
7541 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7542 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7543 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7544 if (iStReg != UINT8_MAX)
7545 {
7546 pFpuCtx->FTW |= RT_BIT(iReg);
7547 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7548 }
7549 }
7550 else
7551 {
7552 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7553 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7554 }
7555}
7556
7557
7558/**
7559 * Raises a FPU stack underflow exception.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param iStReg The destination register that should be loaded
7563 * with QNaN if \#IS is not masked. Specify
7564 * UINT8_MAX if none (like for fcom).
7565 */
7566DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7567{
7568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7569 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7571 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7572}
7573
7574
7575DECL_NO_INLINE(IEM_STATIC, void)
7576iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7577{
7578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7579 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7580 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7581 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7582 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7583}
7584
7585
7586DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7587{
7588 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7589 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7590 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7591 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7592 iemFpuMaybePopOne(pFpuCtx);
7593}
7594
7595
7596DECL_NO_INLINE(IEM_STATIC, void)
7597iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7598{
7599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7601 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604 iemFpuMaybePopOne(pFpuCtx);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7614 iemFpuMaybePopOne(pFpuCtx);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void)
7620iemFpuStackPushUnderflow(PVMCPU pVCpu)
7621{
7622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7623 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7625
7626 if (pFpuCtx->FCW & X86_FCW_IM)
7627 {
7628 /* Masked overflow - Push QNaN. */
7629 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7630 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7631 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7632 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7633 pFpuCtx->FTW |= RT_BIT(iNewTop);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7635 iemFpuRotateStackPush(pFpuCtx);
7636 }
7637 else
7638 {
7639 /* Exception pending - don't change TOP or the register stack. */
7640 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7641 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7642 }
7643}
7644
7645
7646DECL_NO_INLINE(IEM_STATIC, void)
7647iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7648{
7649 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7650 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7652
7653 if (pFpuCtx->FCW & X86_FCW_IM)
7654 {
7655 /* Masked overflow - Push QNaN. */
7656 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7657 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7658 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7659 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7660 pFpuCtx->FTW |= RT_BIT(iNewTop);
7661 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7662 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7663 iemFpuRotateStackPush(pFpuCtx);
7664 }
7665 else
7666 {
7667 /* Exception pending - don't change TOP or the register stack. */
7668 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7669 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7670 }
7671}
7672
7673
7674/**
7675 * Worker routine for raising an FPU stack overflow exception on a push.
7676 *
7677 * @param pFpuCtx The FPU context.
7678 */
7679IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7680{
7681 if (pFpuCtx->FCW & X86_FCW_IM)
7682 {
7683 /* Masked overflow. */
7684 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7685 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7686 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7687 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7688 pFpuCtx->FTW |= RT_BIT(iNewTop);
7689 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7690 iemFpuRotateStackPush(pFpuCtx);
7691 }
7692 else
7693 {
7694 /* Exception pending - don't change TOP or the register stack. */
7695 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7696 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7697 }
7698}
7699
7700
7701/**
7702 * Raises a FPU stack overflow exception on a push.
7703 *
7704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7705 */
7706DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7707{
7708 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7709 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7710 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7711 iemFpuStackPushOverflowOnly(pFpuCtx);
7712}
7713
7714
7715/**
7716 * Raises a FPU stack overflow exception on a push with a memory operand.
7717 *
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param iEffSeg The effective memory operand selector register.
7720 * @param GCPtrEff The effective memory operand offset.
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void)
7723iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7724{
7725 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7726 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7727 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7728 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7729 iemFpuStackPushOverflowOnly(pFpuCtx);
7730}
7731
7732
7733IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7734{
7735 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7736 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7737 if (pFpuCtx->FTW & RT_BIT(iReg))
7738 return VINF_SUCCESS;
7739 return VERR_NOT_FOUND;
7740}
7741
7742
7743IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7744{
7745 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7746 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7747 if (pFpuCtx->FTW & RT_BIT(iReg))
7748 {
7749 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7750 return VINF_SUCCESS;
7751 }
7752 return VERR_NOT_FOUND;
7753}
7754
7755
7756IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7757 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7758{
7759 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7760 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7761 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7762 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7763 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7764 {
7765 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7766 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7767 return VINF_SUCCESS;
7768 }
7769 return VERR_NOT_FOUND;
7770}
7771
7772
7773IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7774{
7775 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7776 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7777 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7778 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7779 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7780 {
7781 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788/**
7789 * Updates the FPU exception status after FCW is changed.
7790 *
7791 * @param pFpuCtx The FPU context.
7792 */
7793IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7794{
7795 uint16_t u16Fsw = pFpuCtx->FSW;
7796 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7797 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7798 else
7799 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7800 pFpuCtx->FSW = u16Fsw;
7801}
7802
7803
7804/**
7805 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7806 *
7807 * @returns The full FTW.
7808 * @param pFpuCtx The FPU context.
7809 */
7810IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7811{
7812 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7813 uint16_t u16Ftw = 0;
7814 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7815 for (unsigned iSt = 0; iSt < 8; iSt++)
7816 {
7817 unsigned const iReg = (iSt + iTop) & 7;
7818 if (!(u8Ftw & RT_BIT(iReg)))
7819 u16Ftw |= 3 << (iReg * 2); /* empty */
7820 else
7821 {
7822 uint16_t uTag;
7823 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7824 if (pr80Reg->s.uExponent == 0x7fff)
7825 uTag = 2; /* Exponent is all 1's => Special. */
7826 else if (pr80Reg->s.uExponent == 0x0000)
7827 {
7828 if (pr80Reg->s.u64Mantissa == 0x0000)
7829 uTag = 1; /* All bits are zero => Zero. */
7830 else
7831 uTag = 2; /* Must be special. */
7832 }
7833 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7834 uTag = 0; /* Valid. */
7835 else
7836 uTag = 2; /* Must be special. */
7837
7838 u16Ftw |= uTag << (iReg * 2); /* empty */
7839 }
7840 }
7841
7842 return u16Ftw;
7843}
7844
7845
7846/**
7847 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7848 *
7849 * @returns The compressed FTW.
7850 * @param u16FullFtw The full FTW to convert.
7851 */
7852IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7853{
7854 uint8_t u8Ftw = 0;
7855 for (unsigned i = 0; i < 8; i++)
7856 {
7857 if ((u16FullFtw & 3) != 3 /*empty*/)
7858 u8Ftw |= RT_BIT(i);
7859 u16FullFtw >>= 2;
7860 }
7861
7862 return u8Ftw;
7863}
7864
7865/** @} */
7866
7867
7868/** @name Memory access.
7869 *
7870 * @{
7871 */
7872
7873
7874/**
7875 * Updates the IEMCPU::cbWritten counter if applicable.
7876 *
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param fAccess The access being accounted for.
7879 * @param cbMem The access size.
7880 */
7881DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7882{
7883 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7884 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7885 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7886}
7887
7888
7889/**
7890 * Checks if the given segment can be written to, raise the appropriate
7891 * exception if not.
7892 *
7893 * @returns VBox strict status code.
7894 *
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param pHid Pointer to the hidden register.
7897 * @param iSegReg The register number.
7898 * @param pu64BaseAddr Where to return the base address to use for the
7899 * segment. (In 64-bit code it may differ from the
7900 * base in the hidden segment.)
7901 */
7902IEM_STATIC VBOXSTRICTRC
7903iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7904{
7905 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7906 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7907 else
7908 {
7909 if (!pHid->Attr.n.u1Present)
7910 {
7911 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7912 AssertRelease(uSel == 0);
7913 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7914 return iemRaiseGeneralProtectionFault0(pVCpu);
7915 }
7916
7917 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7918 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7919 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7920 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7921 *pu64BaseAddr = pHid->u64Base;
7922 }
7923 return VINF_SUCCESS;
7924}
7925
7926
7927/**
7928 * Checks if the given segment can be read from, raise the appropriate
7929 * exception if not.
7930 *
7931 * @returns VBox strict status code.
7932 *
7933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7934 * @param pHid Pointer to the hidden register.
7935 * @param iSegReg The register number.
7936 * @param pu64BaseAddr Where to return the base address to use for the
7937 * segment. (In 64-bit code it may differ from the
7938 * base in the hidden segment.)
7939 */
7940IEM_STATIC VBOXSTRICTRC
7941iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7942{
7943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7944 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7945 else
7946 {
7947 if (!pHid->Attr.n.u1Present)
7948 {
7949 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7950 AssertRelease(uSel == 0);
7951 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7952 return iemRaiseGeneralProtectionFault0(pVCpu);
7953 }
7954
7955 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7956 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7957 *pu64BaseAddr = pHid->u64Base;
7958 }
7959 return VINF_SUCCESS;
7960}
7961
7962
7963/**
7964 * Applies the segment limit, base and attributes.
7965 *
7966 * This may raise a \#GP or \#SS.
7967 *
7968 * @returns VBox strict status code.
7969 *
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param fAccess The kind of access which is being performed.
7972 * @param iSegReg The index of the segment register to apply.
7973 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7974 * TSS, ++).
7975 * @param cbMem The access size.
7976 * @param pGCPtrMem Pointer to the guest memory address to apply
7977 * segmentation to. Input and output parameter.
7978 */
7979IEM_STATIC VBOXSTRICTRC
7980iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7981{
7982 if (iSegReg == UINT8_MAX)
7983 return VINF_SUCCESS;
7984
7985 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7986 switch (pVCpu->iem.s.enmCpuMode)
7987 {
7988 case IEMMODE_16BIT:
7989 case IEMMODE_32BIT:
7990 {
7991 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7992 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7993
7994 if ( pSel->Attr.n.u1Present
7995 && !pSel->Attr.n.u1Unusable)
7996 {
7997 Assert(pSel->Attr.n.u1DescType);
7998 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7999 {
8000 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8001 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8002 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8003
8004 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8005 {
8006 /** @todo CPL check. */
8007 }
8008
8009 /*
8010 * There are two kinds of data selectors, normal and expand down.
8011 */
8012 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8013 {
8014 if ( GCPtrFirst32 > pSel->u32Limit
8015 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8016 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8017 }
8018 else
8019 {
8020 /*
8021 * The upper boundary is defined by the B bit, not the G bit!
8022 */
8023 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8024 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8025 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8026 }
8027 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8028 }
8029 else
8030 {
8031
8032 /*
8033 * Code selector and usually be used to read thru, writing is
8034 * only permitted in real and V8086 mode.
8035 */
8036 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8037 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8038 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8039 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8040 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8041
8042 if ( GCPtrFirst32 > pSel->u32Limit
8043 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8044 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8045
8046 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8047 {
8048 /** @todo CPL check. */
8049 }
8050
8051 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8052 }
8053 }
8054 else
8055 return iemRaiseGeneralProtectionFault0(pVCpu);
8056 return VINF_SUCCESS;
8057 }
8058
8059 case IEMMODE_64BIT:
8060 {
8061 RTGCPTR GCPtrMem = *pGCPtrMem;
8062 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8063 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8064
8065 Assert(cbMem >= 1);
8066 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8067 return VINF_SUCCESS;
8068 return iemRaiseGeneralProtectionFault0(pVCpu);
8069 }
8070
8071 default:
8072 AssertFailedReturn(VERR_IEM_IPE_7);
8073 }
8074}
8075
8076
8077/**
8078 * Translates a virtual address to a physical physical address and checks if we
8079 * can access the page as specified.
8080 *
8081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8082 * @param GCPtrMem The virtual address.
8083 * @param fAccess The intended access.
8084 * @param pGCPhysMem Where to return the physical address.
8085 */
8086IEM_STATIC VBOXSTRICTRC
8087iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8088{
8089 /** @todo Need a different PGM interface here. We're currently using
8090 * generic / REM interfaces. this won't cut it for R0 & RC. */
8091 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8092 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8093 RTGCPHYS GCPhys;
8094 uint64_t fFlags;
8095 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8096 if (RT_FAILURE(rc))
8097 {
8098 /** @todo Check unassigned memory in unpaged mode. */
8099 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8100 *pGCPhysMem = NIL_RTGCPHYS;
8101 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8102 }
8103
8104 /* If the page is writable and does not have the no-exec bit set, all
8105 access is allowed. Otherwise we'll have to check more carefully... */
8106 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8107 {
8108 /* Write to read only memory? */
8109 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8110 && !(fFlags & X86_PTE_RW)
8111 && ( (pVCpu->iem.s.uCpl == 3
8112 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8113 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8114 {
8115 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8116 *pGCPhysMem = NIL_RTGCPHYS;
8117 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8118 }
8119
8120 /* Kernel memory accessed by userland? */
8121 if ( !(fFlags & X86_PTE_US)
8122 && pVCpu->iem.s.uCpl == 3
8123 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8124 {
8125 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8128 }
8129
8130 /* Executing non-executable memory? */
8131 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8132 && (fFlags & X86_PTE_PAE_NX)
8133 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8138 VERR_ACCESS_DENIED);
8139 }
8140 }
8141
8142 /*
8143 * Set the dirty / access flags.
8144 * ASSUMES this is set when the address is translated rather than on committ...
8145 */
8146 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8147 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8148 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8149 {
8150 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8151 AssertRC(rc2);
8152 }
8153
8154 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8155 *pGCPhysMem = GCPhys;
8156 return VINF_SUCCESS;
8157}
8158
8159
8160
8161/**
8162 * Maps a physical page.
8163 *
8164 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8166 * @param GCPhysMem The physical address.
8167 * @param fAccess The intended access.
8168 * @param ppvMem Where to return the mapping address.
8169 * @param pLock The PGM lock.
8170 */
8171IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8172{
8173#ifdef IEM_VERIFICATION_MODE_FULL
8174 /* Force the alternative path so we can ignore writes. */
8175 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8176 {
8177 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8178 {
8179 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8180 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8181 if (RT_FAILURE(rc2))
8182 pVCpu->iem.s.fProblematicMemory = true;
8183 }
8184 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8185 }
8186#endif
8187#ifdef IEM_LOG_MEMORY_WRITES
8188 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8189 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8190#endif
8191#ifdef IEM_VERIFICATION_MODE_MINIMAL
8192 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8193#endif
8194
8195 /** @todo This API may require some improving later. A private deal with PGM
8196 * regarding locking and unlocking needs to be struct. A couple of TLBs
8197 * living in PGM, but with publicly accessible inlined access methods
8198 * could perhaps be an even better solution. */
8199 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8200 GCPhysMem,
8201 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8202 pVCpu->iem.s.fBypassHandlers,
8203 ppvMem,
8204 pLock);
8205 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8206 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8207
8208#ifdef IEM_VERIFICATION_MODE_FULL
8209 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8210 pVCpu->iem.s.fProblematicMemory = true;
8211#endif
8212 return rc;
8213}
8214
8215
8216/**
8217 * Unmap a page previously mapped by iemMemPageMap.
8218 *
8219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8220 * @param GCPhysMem The physical address.
8221 * @param fAccess The intended access.
8222 * @param pvMem What iemMemPageMap returned.
8223 * @param pLock The PGM lock.
8224 */
8225DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8226{
8227 NOREF(pVCpu);
8228 NOREF(GCPhysMem);
8229 NOREF(fAccess);
8230 NOREF(pvMem);
8231 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8232}
8233
8234
8235/**
8236 * Looks up a memory mapping entry.
8237 *
8238 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8240 * @param pvMem The memory address.
8241 * @param fAccess The access to.
8242 */
8243DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8244{
8245 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8246 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8247 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8248 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8249 return 0;
8250 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8251 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8252 return 1;
8253 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8254 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8255 return 2;
8256 return VERR_NOT_FOUND;
8257}
8258
8259
8260/**
8261 * Finds a free memmap entry when using iNextMapping doesn't work.
8262 *
8263 * @returns Memory mapping index, 1024 on failure.
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 */
8266IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8267{
8268 /*
8269 * The easy case.
8270 */
8271 if (pVCpu->iem.s.cActiveMappings == 0)
8272 {
8273 pVCpu->iem.s.iNextMapping = 1;
8274 return 0;
8275 }
8276
8277 /* There should be enough mappings for all instructions. */
8278 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8279
8280 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8281 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8282 return i;
8283
8284 AssertFailedReturn(1024);
8285}
8286
8287
8288/**
8289 * Commits a bounce buffer that needs writing back and unmaps it.
8290 *
8291 * @returns Strict VBox status code.
8292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8293 * @param iMemMap The index of the buffer to commit.
8294 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8295 * Always false in ring-3, obviously.
8296 */
8297IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8298{
8299 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8300 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8301#ifdef IN_RING3
8302 Assert(!fPostponeFail);
8303 RT_NOREF_PV(fPostponeFail);
8304#endif
8305
8306 /*
8307 * Do the writing.
8308 */
8309#ifndef IEM_VERIFICATION_MODE_MINIMAL
8310 PVM pVM = pVCpu->CTX_SUFF(pVM);
8311 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8312 && !IEM_VERIFICATION_ENABLED(pVCpu))
8313 {
8314 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8315 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8316 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8317 if (!pVCpu->iem.s.fBypassHandlers)
8318 {
8319 /*
8320 * Carefully and efficiently dealing with access handler return
8321 * codes make this a little bloated.
8322 */
8323 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8324 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8325 pbBuf,
8326 cbFirst,
8327 PGMACCESSORIGIN_IEM);
8328 if (rcStrict == VINF_SUCCESS)
8329 {
8330 if (cbSecond)
8331 {
8332 rcStrict = PGMPhysWrite(pVM,
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8334 pbBuf + cbFirst,
8335 cbSecond,
8336 PGMACCESSORIGIN_IEM);
8337 if (rcStrict == VINF_SUCCESS)
8338 { /* nothing */ }
8339 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8340 {
8341 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8344 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8345 }
8346# ifndef IN_RING3
8347 else if (fPostponeFail)
8348 {
8349 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8350 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8352 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8353 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8354 return iemSetPassUpStatus(pVCpu, rcStrict);
8355 }
8356# endif
8357 else
8358 {
8359 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8362 return rcStrict;
8363 }
8364 }
8365 }
8366 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8367 {
8368 if (!cbSecond)
8369 {
8370 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8373 }
8374 else
8375 {
8376 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8378 pbBuf + cbFirst,
8379 cbSecond,
8380 PGMACCESSORIGIN_IEM);
8381 if (rcStrict2 == VINF_SUCCESS)
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8386 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8387 }
8388 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8393 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8394 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8395 }
8396# ifndef IN_RING3
8397 else if (fPostponeFail)
8398 {
8399 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8402 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8403 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8404 return iemSetPassUpStatus(pVCpu, rcStrict);
8405 }
8406# endif
8407 else
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8412 return rcStrict2;
8413 }
8414 }
8415 }
8416# ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 if (!cbSecond)
8423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8424 else
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8426 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8427 return iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429# endif
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rcStrict;
8436 }
8437 }
8438 else
8439 {
8440 /*
8441 * No access handlers, much simpler.
8442 */
8443 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8444 if (RT_SUCCESS(rc))
8445 {
8446 if (cbSecond)
8447 {
8448 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8449 if (RT_SUCCESS(rc))
8450 { /* likely */ }
8451 else
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8456 return rc;
8457 }
8458 }
8459 }
8460 else
8461 {
8462 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8465 return rc;
8466 }
8467 }
8468 }
8469#endif
8470
8471#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8472 /*
8473 * Record the write(s).
8474 */
8475 if (!pVCpu->iem.s.fNoRem)
8476 {
8477 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8478 if (pEvtRec)
8479 {
8480 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8481 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8482 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8483 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8484 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8485 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8486 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8487 }
8488 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8489 {
8490 pEvtRec = iemVerifyAllocRecord(pVCpu);
8491 if (pEvtRec)
8492 {
8493 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8494 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8495 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8496 memcpy(pEvtRec->u.RamWrite.ab,
8497 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8498 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8499 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8500 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8501 }
8502 }
8503 }
8504#endif
8505#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8506 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8507 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8508 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8509 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8510 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8511 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8512
8513 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8514 g_cbIemWrote = cbWrote;
8515 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8516#endif
8517
8518 /*
8519 * Free the mapping entry.
8520 */
8521 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8522 Assert(pVCpu->iem.s.cActiveMappings != 0);
8523 pVCpu->iem.s.cActiveMappings--;
8524 return VINF_SUCCESS;
8525}
8526
8527
8528/**
8529 * iemMemMap worker that deals with a request crossing pages.
8530 */
8531IEM_STATIC VBOXSTRICTRC
8532iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8533{
8534 /*
8535 * Do the address translations.
8536 */
8537 RTGCPHYS GCPhysFirst;
8538 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8539 if (rcStrict != VINF_SUCCESS)
8540 return rcStrict;
8541
8542 RTGCPHYS GCPhysSecond;
8543 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8544 fAccess, &GCPhysSecond);
8545 if (rcStrict != VINF_SUCCESS)
8546 return rcStrict;
8547 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8548
8549 PVM pVM = pVCpu->CTX_SUFF(pVM);
8550#ifdef IEM_VERIFICATION_MODE_FULL
8551 /*
8552 * Detect problematic memory when verifying so we can select
8553 * the right execution engine. (TLB: Redo this.)
8554 */
8555 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8556 {
8557 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8558 if (RT_SUCCESS(rc2))
8559 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8560 if (RT_FAILURE(rc2))
8561 pVCpu->iem.s.fProblematicMemory = true;
8562 }
8563#endif
8564
8565
8566 /*
8567 * Read in the current memory content if it's a read, execute or partial
8568 * write access.
8569 */
8570 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8571 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8572 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8573
8574 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8575 {
8576 if (!pVCpu->iem.s.fBypassHandlers)
8577 {
8578 /*
8579 * Must carefully deal with access handler status codes here,
8580 * makes the code a bit bloated.
8581 */
8582 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8583 if (rcStrict == VINF_SUCCESS)
8584 {
8585 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8586 if (rcStrict == VINF_SUCCESS)
8587 { /*likely */ }
8588 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8589 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8590 else
8591 {
8592 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8593 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8594 return rcStrict;
8595 }
8596 }
8597 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8598 {
8599 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8600 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8601 {
8602 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8603 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8604 }
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8608 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8609 return rcStrict2;
8610 }
8611 }
8612 else
8613 {
8614 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8615 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8616 return rcStrict;
8617 }
8618 }
8619 else
8620 {
8621 /*
8622 * No informational status codes here, much more straight forward.
8623 */
8624 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8625 if (RT_SUCCESS(rc))
8626 {
8627 Assert(rc == VINF_SUCCESS);
8628 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8629 if (RT_SUCCESS(rc))
8630 Assert(rc == VINF_SUCCESS);
8631 else
8632 {
8633 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8634 return rc;
8635 }
8636 }
8637 else
8638 {
8639 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8640 return rc;
8641 }
8642 }
8643
8644#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8645 if ( !pVCpu->iem.s.fNoRem
8646 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8647 {
8648 /*
8649 * Record the reads.
8650 */
8651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8652 if (pEvtRec)
8653 {
8654 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8655 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8656 pEvtRec->u.RamRead.cb = cbFirstPage;
8657 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8658 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8659 }
8660 pEvtRec = iemVerifyAllocRecord(pVCpu);
8661 if (pEvtRec)
8662 {
8663 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8664 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8665 pEvtRec->u.RamRead.cb = cbSecondPage;
8666 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8667 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8668 }
8669 }
8670#endif
8671 }
8672#ifdef VBOX_STRICT
8673 else
8674 memset(pbBuf, 0xcc, cbMem);
8675 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8676 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8677#endif
8678
8679 /*
8680 * Commit the bounce buffer entry.
8681 */
8682 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8683 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8684 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8686 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8687 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8688 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8689 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8690 pVCpu->iem.s.cActiveMappings++;
8691
8692 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8693 *ppvMem = pbBuf;
8694 return VINF_SUCCESS;
8695}
8696
8697
8698/**
8699 * iemMemMap woker that deals with iemMemPageMap failures.
8700 */
8701IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8702 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8703{
8704 /*
8705 * Filter out conditions we can handle and the ones which shouldn't happen.
8706 */
8707 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8708 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8709 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8710 {
8711 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8712 return rcMap;
8713 }
8714 pVCpu->iem.s.cPotentialExits++;
8715
8716 /*
8717 * Read in the current memory content if it's a read, execute or partial
8718 * write access.
8719 */
8720 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8721 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8722 {
8723 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8724 memset(pbBuf, 0xff, cbMem);
8725 else
8726 {
8727 int rc;
8728 if (!pVCpu->iem.s.fBypassHandlers)
8729 {
8730 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8731 if (rcStrict == VINF_SUCCESS)
8732 { /* nothing */ }
8733 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8734 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8735 else
8736 {
8737 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8738 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8739 return rcStrict;
8740 }
8741 }
8742 else
8743 {
8744 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8745 if (RT_SUCCESS(rc))
8746 { /* likely */ }
8747 else
8748 {
8749 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8750 GCPhysFirst, rc));
8751 return rc;
8752 }
8753 }
8754 }
8755
8756#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8757 if ( !pVCpu->iem.s.fNoRem
8758 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8759 {
8760 /*
8761 * Record the read.
8762 */
8763 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8764 if (pEvtRec)
8765 {
8766 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8767 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8768 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8769 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8770 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8771 }
8772 }
8773#endif
8774 }
8775#ifdef VBOX_STRICT
8776 else
8777 memset(pbBuf, 0xcc, cbMem);
8778#endif
8779#ifdef VBOX_STRICT
8780 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8781 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8782#endif
8783
8784 /*
8785 * Commit the bounce buffer entry.
8786 */
8787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8789 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8790 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8791 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8792 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8794 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8795 pVCpu->iem.s.cActiveMappings++;
8796
8797 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8798 *ppvMem = pbBuf;
8799 return VINF_SUCCESS;
8800}
8801
8802
8803
8804/**
8805 * Maps the specified guest memory for the given kind of access.
8806 *
8807 * This may be using bounce buffering of the memory if it's crossing a page
8808 * boundary or if there is an access handler installed for any of it. Because
8809 * of lock prefix guarantees, we're in for some extra clutter when this
8810 * happens.
8811 *
8812 * This may raise a \#GP, \#SS, \#PF or \#AC.
8813 *
8814 * @returns VBox strict status code.
8815 *
8816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8817 * @param ppvMem Where to return the pointer to the mapped
8818 * memory.
8819 * @param cbMem The number of bytes to map. This is usually 1,
8820 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8821 * string operations it can be up to a page.
8822 * @param iSegReg The index of the segment register to use for
8823 * this access. The base and limits are checked.
8824 * Use UINT8_MAX to indicate that no segmentation
8825 * is required (for IDT, GDT and LDT accesses).
8826 * @param GCPtrMem The address of the guest memory.
8827 * @param fAccess How the memory is being accessed. The
8828 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8829 * how to map the memory, while the
8830 * IEM_ACCESS_WHAT_XXX bit is used when raising
8831 * exceptions.
8832 */
8833IEM_STATIC VBOXSTRICTRC
8834iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8835{
8836 /*
8837 * Check the input and figure out which mapping entry to use.
8838 */
8839 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8840 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8841 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8842
8843 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8844 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8845 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8846 {
8847 iMemMap = iemMemMapFindFree(pVCpu);
8848 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8849 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8850 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8851 pVCpu->iem.s.aMemMappings[2].fAccess),
8852 VERR_IEM_IPE_9);
8853 }
8854
8855 /*
8856 * Map the memory, checking that we can actually access it. If something
8857 * slightly complicated happens, fall back on bounce buffering.
8858 */
8859 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8860 if (rcStrict != VINF_SUCCESS)
8861 return rcStrict;
8862
8863 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8864 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8865
8866 RTGCPHYS GCPhysFirst;
8867 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8868 if (rcStrict != VINF_SUCCESS)
8869 return rcStrict;
8870
8871 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8872 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8873 if (fAccess & IEM_ACCESS_TYPE_READ)
8874 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8875
8876 void *pvMem;
8877 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8878 if (rcStrict != VINF_SUCCESS)
8879 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8880
8881 /*
8882 * Fill in the mapping table entry.
8883 */
8884 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8886 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8887 pVCpu->iem.s.cActiveMappings++;
8888
8889 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8890 *ppvMem = pvMem;
8891 return VINF_SUCCESS;
8892}
8893
8894
8895/**
8896 * Commits the guest memory if bounce buffered and unmaps it.
8897 *
8898 * @returns Strict VBox status code.
8899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8900 * @param pvMem The mapping.
8901 * @param fAccess The kind of access.
8902 */
8903IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8904{
8905 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8906 AssertReturn(iMemMap >= 0, iMemMap);
8907
8908 /* If it's bounce buffered, we may need to write back the buffer. */
8909 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8910 {
8911 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8912 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8913 }
8914 /* Otherwise unlock it. */
8915 else
8916 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8917
8918 /* Free the entry. */
8919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8920 Assert(pVCpu->iem.s.cActiveMappings != 0);
8921 pVCpu->iem.s.cActiveMappings--;
8922 return VINF_SUCCESS;
8923}
8924
8925#ifdef IEM_WITH_SETJMP
8926
8927/**
8928 * Maps the specified guest memory for the given kind of access, longjmp on
8929 * error.
8930 *
8931 * This may be using bounce buffering of the memory if it's crossing a page
8932 * boundary or if there is an access handler installed for any of it. Because
8933 * of lock prefix guarantees, we're in for some extra clutter when this
8934 * happens.
8935 *
8936 * This may raise a \#GP, \#SS, \#PF or \#AC.
8937 *
8938 * @returns Pointer to the mapped memory.
8939 *
8940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8941 * @param cbMem The number of bytes to map. This is usually 1,
8942 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8943 * string operations it can be up to a page.
8944 * @param iSegReg The index of the segment register to use for
8945 * this access. The base and limits are checked.
8946 * Use UINT8_MAX to indicate that no segmentation
8947 * is required (for IDT, GDT and LDT accesses).
8948 * @param GCPtrMem The address of the guest memory.
8949 * @param fAccess How the memory is being accessed. The
8950 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8951 * how to map the memory, while the
8952 * IEM_ACCESS_WHAT_XXX bit is used when raising
8953 * exceptions.
8954 */
8955IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8956{
8957 /*
8958 * Check the input and figure out which mapping entry to use.
8959 */
8960 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8961 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8962 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8963
8964 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8965 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8966 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8967 {
8968 iMemMap = iemMemMapFindFree(pVCpu);
8969 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8970 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8971 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8972 pVCpu->iem.s.aMemMappings[2].fAccess),
8973 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8974 }
8975
8976 /*
8977 * Map the memory, checking that we can actually access it. If something
8978 * slightly complicated happens, fall back on bounce buffering.
8979 */
8980 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8981 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8982 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8983
8984 /* Crossing a page boundary? */
8985 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8986 { /* No (likely). */ }
8987 else
8988 {
8989 void *pvMem;
8990 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8991 if (rcStrict == VINF_SUCCESS)
8992 return pvMem;
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8994 }
8995
8996 RTGCPHYS GCPhysFirst;
8997 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8998 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8999 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000
9001 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9002 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9003 if (fAccess & IEM_ACCESS_TYPE_READ)
9004 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9005
9006 void *pvMem;
9007 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9008 if (rcStrict == VINF_SUCCESS)
9009 { /* likely */ }
9010 else
9011 {
9012 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9013 if (rcStrict == VINF_SUCCESS)
9014 return pvMem;
9015 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016 }
9017
9018 /*
9019 * Fill in the mapping table entry.
9020 */
9021 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9023 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9024 pVCpu->iem.s.cActiveMappings++;
9025
9026 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9027 return pvMem;
9028}
9029
9030
9031/**
9032 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9033 *
9034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9035 * @param pvMem The mapping.
9036 * @param fAccess The kind of access.
9037 */
9038IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9039{
9040 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9041 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9042
9043 /* If it's bounce buffered, we may need to write back the buffer. */
9044 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9045 {
9046 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9047 {
9048 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9049 if (rcStrict == VINF_SUCCESS)
9050 return;
9051 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9052 }
9053 }
9054 /* Otherwise unlock it. */
9055 else
9056 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9057
9058 /* Free the entry. */
9059 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9060 Assert(pVCpu->iem.s.cActiveMappings != 0);
9061 pVCpu->iem.s.cActiveMappings--;
9062}
9063
9064#endif
9065
9066#ifndef IN_RING3
9067/**
9068 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9069 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9070 *
9071 * Allows the instruction to be completed and retired, while the IEM user will
9072 * return to ring-3 immediately afterwards and do the postponed writes there.
9073 *
9074 * @returns VBox status code (no strict statuses). Caller must check
9075 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9077 * @param pvMem The mapping.
9078 * @param fAccess The kind of access.
9079 */
9080IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9081{
9082 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9083 AssertReturn(iMemMap >= 0, iMemMap);
9084
9085 /* If it's bounce buffered, we may need to write back the buffer. */
9086 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9087 {
9088 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9089 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9090 }
9091 /* Otherwise unlock it. */
9092 else
9093 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9094
9095 /* Free the entry. */
9096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9097 Assert(pVCpu->iem.s.cActiveMappings != 0);
9098 pVCpu->iem.s.cActiveMappings--;
9099 return VINF_SUCCESS;
9100}
9101#endif
9102
9103
9104/**
9105 * Rollbacks mappings, releasing page locks and such.
9106 *
9107 * The caller shall only call this after checking cActiveMappings.
9108 *
9109 * @returns Strict VBox status code to pass up.
9110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9111 */
9112IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9113{
9114 Assert(pVCpu->iem.s.cActiveMappings > 0);
9115
9116 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9117 while (iMemMap-- > 0)
9118 {
9119 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9120 if (fAccess != IEM_ACCESS_INVALID)
9121 {
9122 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9123 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9124 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9125 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9126 Assert(pVCpu->iem.s.cActiveMappings > 0);
9127 pVCpu->iem.s.cActiveMappings--;
9128 }
9129 }
9130}
9131
9132
9133/**
9134 * Fetches a data byte.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu8Dst Where to return the byte.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint8_t const *pu8Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu8Dst = *pu8Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Fetches a data byte, longjmp on error.
9160 *
9161 * @returns The byte.
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9171 uint8_t const bRet = *pu8Src;
9172 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9173 return bRet;
9174}
9175#endif /* IEM_WITH_SETJMP */
9176
9177
9178/**
9179 * Fetches a data word.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu16Dst Where to return the word.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint16_t const *pu16Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu16Dst = *pu16Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203/**
9204 * Fetches a data word, longjmp on error.
9205 *
9206 * @returns The word
9207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9208 * @param iSegReg The index of the segment register to use for
9209 * this access. The base and limits are checked.
9210 * @param GCPtrMem The address of the guest memory.
9211 */
9212DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9213{
9214 /* The lazy approach for now... */
9215 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9216 uint16_t const u16Ret = *pu16Src;
9217 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9218 return u16Ret;
9219}
9220#endif
9221
9222
9223/**
9224 * Fetches a data dword.
9225 *
9226 * @returns Strict VBox status code.
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param pu32Dst Where to return the dword.
9229 * @param iSegReg The index of the segment register to use for
9230 * this access. The base and limits are checked.
9231 * @param GCPtrMem The address of the guest memory.
9232 */
9233IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9234{
9235 /* The lazy approach for now... */
9236 uint32_t const *pu32Src;
9237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9238 if (rc == VINF_SUCCESS)
9239 {
9240 *pu32Dst = *pu32Src;
9241 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9242 }
9243 return rc;
9244}
9245
9246
9247#ifdef IEM_WITH_SETJMP
9248
9249IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9250{
9251 Assert(cbMem >= 1);
9252 Assert(iSegReg < X86_SREG_COUNT);
9253
9254 /*
9255 * 64-bit mode is simpler.
9256 */
9257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9258 {
9259 if (iSegReg >= X86_SREG_FS)
9260 {
9261 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9262 GCPtrMem += pSel->u64Base;
9263 }
9264
9265 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9266 return GCPtrMem;
9267 }
9268 /*
9269 * 16-bit and 32-bit segmentation.
9270 */
9271 else
9272 {
9273 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9274 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9275 == X86DESCATTR_P /* data, expand up */
9276 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9277 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9278 {
9279 /* expand up */
9280 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9281 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9282 && GCPtrLast32 > (uint32_t)GCPtrMem))
9283 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9284 }
9285 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9286 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9287 {
9288 /* expand down */
9289 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9290 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9291 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9292 && GCPtrLast32 > (uint32_t)GCPtrMem))
9293 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9294 }
9295 else
9296 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9297 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9298 }
9299 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9300}
9301
9302
9303IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9304{
9305 Assert(cbMem >= 1);
9306 Assert(iSegReg < X86_SREG_COUNT);
9307
9308 /*
9309 * 64-bit mode is simpler.
9310 */
9311 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9312 {
9313 if (iSegReg >= X86_SREG_FS)
9314 {
9315 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9316 GCPtrMem += pSel->u64Base;
9317 }
9318
9319 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9320 return GCPtrMem;
9321 }
9322 /*
9323 * 16-bit and 32-bit segmentation.
9324 */
9325 else
9326 {
9327 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9328 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9329 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9330 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9331 {
9332 /* expand up */
9333 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9334 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9339 {
9340 /* expand down */
9341 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9342 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9343 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9344 && GCPtrLast32 > (uint32_t)GCPtrMem))
9345 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9346 }
9347 else
9348 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9349 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9350 }
9351 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9352}
9353
9354
9355/**
9356 * Fetches a data dword, longjmp on error, fallback/safe version.
9357 *
9358 * @returns The dword
9359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9360 * @param iSegReg The index of the segment register to use for
9361 * this access. The base and limits are checked.
9362 * @param GCPtrMem The address of the guest memory.
9363 */
9364IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9365{
9366 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9367 uint32_t const u32Ret = *pu32Src;
9368 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9369 return u32Ret;
9370}
9371
9372
9373/**
9374 * Fetches a data dword, longjmp on error.
9375 *
9376 * @returns The dword
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384# ifdef IEM_WITH_DATA_TLB
9385 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9386 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9387 {
9388 /// @todo more later.
9389 }
9390
9391 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9392# else
9393 /* The lazy approach. */
9394 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9395 uint32_t const u32Ret = *pu32Src;
9396 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9397 return u32Ret;
9398# endif
9399}
9400#endif
9401
9402
9403#ifdef SOME_UNUSED_FUNCTION
9404/**
9405 * Fetches a data dword and sign extends it to a qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param pu64Dst Where to return the sign extended value.
9410 * @param iSegReg The index of the segment register to use for
9411 * this access. The base and limits are checked.
9412 * @param GCPtrMem The address of the guest memory.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9415{
9416 /* The lazy approach for now... */
9417 int32_t const *pi32Src;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = *pi32Src;
9422 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9423 }
9424#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9425 else
9426 *pu64Dst = 0;
9427#endif
9428 return rc;
9429}
9430#endif
9431
9432
9433/**
9434 * Fetches a data qword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param pu64Dst Where to return the qword.
9439 * @param iSegReg The index of the segment register to use for
9440 * this access. The base and limits are checked.
9441 * @param GCPtrMem The address of the guest memory.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9444{
9445 /* The lazy approach for now... */
9446 uint64_t const *pu64Src;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = *pu64Src;
9451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9452 }
9453 return rc;
9454}
9455
9456
9457#ifdef IEM_WITH_SETJMP
9458/**
9459 * Fetches a data qword, longjmp on error.
9460 *
9461 * @returns The qword.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param iSegReg The index of the segment register to use for
9464 * this access. The base and limits are checked.
9465 * @param GCPtrMem The address of the guest memory.
9466 */
9467DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9468{
9469 /* The lazy approach for now... */
9470 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9471 uint64_t const u64Ret = *pu64Src;
9472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9473 return u64Ret;
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu64Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9492 if (RT_UNLIKELY(GCPtrMem & 15))
9493 return iemRaiseGeneralProtectionFault0(pVCpu);
9494
9495 uint64_t const *pu64Src;
9496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 if (rc == VINF_SUCCESS)
9498 {
9499 *pu64Dst = *pu64Src;
9500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9501 }
9502 return rc;
9503}
9504
9505
9506#ifdef IEM_WITH_SETJMP
9507/**
9508 * Fetches a data qword, longjmp on error.
9509 *
9510 * @returns The qword.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 */
9516DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9517{
9518 /* The lazy approach for now... */
9519 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9520 if (RT_LIKELY(!(GCPtrMem & 15)))
9521 {
9522 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9523 uint64_t const u64Ret = *pu64Src;
9524 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9525 return u64Ret;
9526 }
9527
9528 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9529 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9530}
9531#endif
9532
9533
9534/**
9535 * Fetches a data tword.
9536 *
9537 * @returns Strict VBox status code.
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param pr80Dst Where to return the tword.
9540 * @param iSegReg The index of the segment register to use for
9541 * this access. The base and limits are checked.
9542 * @param GCPtrMem The address of the guest memory.
9543 */
9544IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /* The lazy approach for now... */
9547 PCRTFLOAT80U pr80Src;
9548 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9549 if (rc == VINF_SUCCESS)
9550 {
9551 *pr80Dst = *pr80Src;
9552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9553 }
9554 return rc;
9555}
9556
9557
9558#ifdef IEM_WITH_SETJMP
9559/**
9560 * Fetches a data tword, longjmp on error.
9561 *
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pr80Dst Where to return the tword.
9564 * @param iSegReg The index of the segment register to use for
9565 * this access. The base and limits are checked.
9566 * @param GCPtrMem The address of the guest memory.
9567 */
9568DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9569{
9570 /* The lazy approach for now... */
9571 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9572 *pr80Dst = *pr80Src;
9573 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9574}
9575#endif
9576
9577
9578/**
9579 * Fetches a data dqword (double qword), generally SSE related.
9580 *
9581 * @returns Strict VBox status code.
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pu128Dst Where to return the qword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTUINT128U pu128Src;
9592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9593 if (rc == VINF_SUCCESS)
9594 {
9595 pu128Dst->au64[0] = pu128Src->au64[0];
9596 pu128Dst->au64[1] = pu128Src->au64[1];
9597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9598 }
9599 return rc;
9600}
9601
9602
9603#ifdef IEM_WITH_SETJMP
9604/**
9605 * Fetches a data dqword (double qword), generally SSE related.
9606 *
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pu128Dst Where to return the qword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 pu128Dst->au64[0] = pu128Src->au64[0];
9618 pu128Dst->au64[1] = pu128Src->au64[1];
9619 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9620}
9621#endif
9622
9623
9624/**
9625 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9626 * related.
9627 *
9628 * Raises \#GP(0) if not aligned.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param pu128Dst Where to return the qword.
9633 * @param iSegReg The index of the segment register to use for
9634 * this access. The base and limits are checked.
9635 * @param GCPtrMem The address of the guest memory.
9636 */
9637IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9638{
9639 /* The lazy approach for now... */
9640 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9641 if ( (GCPtrMem & 15)
9642 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9643 return iemRaiseGeneralProtectionFault0(pVCpu);
9644
9645 PCRTUINT128U pu128Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 pu128Dst->au64[0] = pu128Src->au64[0];
9650 pu128Dst->au64[1] = pu128Src->au64[1];
9651 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9652 }
9653 return rc;
9654}
9655
9656
9657#ifdef IEM_WITH_SETJMP
9658/**
9659 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9660 * related, longjmp on error.
9661 *
9662 * Raises \#GP(0) if not aligned.
9663 *
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu128Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9674 if ( (GCPtrMem & 15) == 0
9675 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9676 {
9677 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9678 pu128Dst->au64[0] = pu128Src->au64[0];
9679 pu128Dst->au64[1] = pu128Src->au64[1];
9680 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9681 return;
9682 }
9683
9684 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9685 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9686}
9687#endif
9688
9689
9690/**
9691 * Fetches a data oword (octo word), generally AVX related.
9692 *
9693 * @returns Strict VBox status code.
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu256Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 PCRTUINT256U pu256Src;
9704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9705 if (rc == VINF_SUCCESS)
9706 {
9707 pu256Dst->au64[0] = pu256Src->au64[0];
9708 pu256Dst->au64[1] = pu256Src->au64[1];
9709 pu256Dst->au64[2] = pu256Src->au64[2];
9710 pu256Dst->au64[3] = pu256Src->au64[3];
9711 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9712 }
9713 return rc;
9714}
9715
9716
9717#ifdef IEM_WITH_SETJMP
9718/**
9719 * Fetches a data oword (octo word), generally AVX related.
9720 *
9721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9722 * @param pu256Dst Where to return the qword.
9723 * @param iSegReg The index of the segment register to use for
9724 * this access. The base and limits are checked.
9725 * @param GCPtrMem The address of the guest memory.
9726 */
9727IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9728{
9729 /* The lazy approach for now... */
9730 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9731 pu256Dst->au64[0] = pu256Src->au64[0];
9732 pu256Dst->au64[1] = pu256Src->au64[1];
9733 pu256Dst->au64[2] = pu256Src->au64[2];
9734 pu256Dst->au64[3] = pu256Src->au64[3];
9735 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9736}
9737#endif
9738
9739
9740/**
9741 * Fetches a data oword (octo word) at an aligned address, generally AVX
9742 * related.
9743 *
9744 * Raises \#GP(0) if not aligned.
9745 *
9746 * @returns Strict VBox status code.
9747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9748 * @param pu256Dst Where to return the qword.
9749 * @param iSegReg The index of the segment register to use for
9750 * this access. The base and limits are checked.
9751 * @param GCPtrMem The address of the guest memory.
9752 */
9753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9754{
9755 /* The lazy approach for now... */
9756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9757 if (GCPtrMem & 31)
9758 return iemRaiseGeneralProtectionFault0(pVCpu);
9759
9760 PCRTUINT256U pu256Src;
9761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9762 if (rc == VINF_SUCCESS)
9763 {
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769 }
9770 return rc;
9771}
9772
9773
9774#ifdef IEM_WITH_SETJMP
9775/**
9776 * Fetches a data oword (octo word) at an aligned address, generally AVX
9777 * related, longjmp on error.
9778 *
9779 * Raises \#GP(0) if not aligned.
9780 *
9781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9782 * @param pu256Dst Where to return the qword.
9783 * @param iSegReg The index of the segment register to use for
9784 * this access. The base and limits are checked.
9785 * @param GCPtrMem The address of the guest memory.
9786 */
9787DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9788{
9789 /* The lazy approach for now... */
9790 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9791 if ((GCPtrMem & 31) == 0)
9792 {
9793 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9794 pu256Dst->au64[0] = pu256Src->au64[0];
9795 pu256Dst->au64[1] = pu256Src->au64[1];
9796 pu256Dst->au64[2] = pu256Src->au64[2];
9797 pu256Dst->au64[3] = pu256Src->au64[3];
9798 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9799 return;
9800 }
9801
9802 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9804}
9805#endif
9806
9807
9808
9809/**
9810 * Fetches a descriptor register (lgdt, lidt).
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9814 * @param pcbLimit Where to return the limit.
9815 * @param pGCPtrBase Where to return the base.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 * @param enmOpSize The effective operand size.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9822 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9823{
9824 /*
9825 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9826 * little special:
9827 * - The two reads are done separately.
9828 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9829 * - We suspect the 386 to actually commit the limit before the base in
9830 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9831 * don't try emulate this eccentric behavior, because it's not well
9832 * enough understood and rather hard to trigger.
9833 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9834 */
9835 VBOXSTRICTRC rcStrict;
9836 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9837 {
9838 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9841 }
9842 else
9843 {
9844 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9845 if (enmOpSize == IEMMODE_32BIT)
9846 {
9847 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9848 {
9849 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9850 if (rcStrict == VINF_SUCCESS)
9851 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9852 }
9853 else
9854 {
9855 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9856 if (rcStrict == VINF_SUCCESS)
9857 {
9858 *pcbLimit = (uint16_t)uTmp;
9859 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9860 }
9861 }
9862 if (rcStrict == VINF_SUCCESS)
9863 *pGCPtrBase = uTmp;
9864 }
9865 else
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 {
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9871 if (rcStrict == VINF_SUCCESS)
9872 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9873 }
9874 }
9875 }
9876 return rcStrict;
9877}
9878
9879
9880
9881/**
9882 * Stores a data byte.
9883 *
9884 * @returns Strict VBox status code.
9885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9886 * @param iSegReg The index of the segment register to use for
9887 * this access. The base and limits are checked.
9888 * @param GCPtrMem The address of the guest memory.
9889 * @param u8Value The value to store.
9890 */
9891IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9892{
9893 /* The lazy approach for now... */
9894 uint8_t *pu8Dst;
9895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9896 if (rc == VINF_SUCCESS)
9897 {
9898 *pu8Dst = u8Value;
9899 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9900 }
9901 return rc;
9902}
9903
9904
9905#ifdef IEM_WITH_SETJMP
9906/**
9907 * Stores a data byte, longjmp on error.
9908 *
9909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9910 * @param iSegReg The index of the segment register to use for
9911 * this access. The base and limits are checked.
9912 * @param GCPtrMem The address of the guest memory.
9913 * @param u8Value The value to store.
9914 */
9915IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9916{
9917 /* The lazy approach for now... */
9918 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9919 *pu8Dst = u8Value;
9920 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9921}
9922#endif
9923
9924
9925/**
9926 * Stores a data word.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u16Value The value to store.
9934 */
9935IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9936{
9937 /* The lazy approach for now... */
9938 uint16_t *pu16Dst;
9939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 if (rc == VINF_SUCCESS)
9941 {
9942 *pu16Dst = u16Value;
9943 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9944 }
9945 return rc;
9946}
9947
9948
9949#ifdef IEM_WITH_SETJMP
9950/**
9951 * Stores a data word, longjmp on error.
9952 *
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param iSegReg The index of the segment register to use for
9955 * this access. The base and limits are checked.
9956 * @param GCPtrMem The address of the guest memory.
9957 * @param u16Value The value to store.
9958 */
9959IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9960{
9961 /* The lazy approach for now... */
9962 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9963 *pu16Dst = u16Value;
9964 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9965}
9966#endif
9967
9968
9969/**
9970 * Stores a data dword.
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u32Value The value to store.
9978 */
9979IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9980{
9981 /* The lazy approach for now... */
9982 uint32_t *pu32Dst;
9983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9984 if (rc == VINF_SUCCESS)
9985 {
9986 *pu32Dst = u32Value;
9987 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9988 }
9989 return rc;
9990}
9991
9992
9993#ifdef IEM_WITH_SETJMP
9994/**
9995 * Stores a data dword.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param iSegReg The index of the segment register to use for
10000 * this access. The base and limits are checked.
10001 * @param GCPtrMem The address of the guest memory.
10002 * @param u32Value The value to store.
10003 */
10004IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10005{
10006 /* The lazy approach for now... */
10007 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10008 *pu32Dst = u32Value;
10009 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10010}
10011#endif
10012
10013
10014/**
10015 * Stores a data qword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u64Value The value to store.
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10025{
10026 /* The lazy approach for now... */
10027 uint64_t *pu64Dst;
10028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 if (rc == VINF_SUCCESS)
10030 {
10031 *pu64Dst = u64Value;
10032 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10033 }
10034 return rc;
10035}
10036
10037
10038#ifdef IEM_WITH_SETJMP
10039/**
10040 * Stores a data qword, longjmp on error.
10041 *
10042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10043 * @param iSegReg The index of the segment register to use for
10044 * this access. The base and limits are checked.
10045 * @param GCPtrMem The address of the guest memory.
10046 * @param u64Value The value to store.
10047 */
10048IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10049{
10050 /* The lazy approach for now... */
10051 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10052 *pu64Dst = u64Value;
10053 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10054}
10055#endif
10056
10057
10058/**
10059 * Stores a data dqword.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u128Value The value to store.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10069{
10070 /* The lazy approach for now... */
10071 PRTUINT128U pu128Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu128Dst->au64[0] = u128Value.au64[0];
10076 pu128Dst->au64[1] = u128Value.au64[1];
10077 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data dqword, longjmp on error.
10086 *
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param u128Value The value to store.
10092 */
10093IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10094{
10095 /* The lazy approach for now... */
10096 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 pu128Dst->au64[0] = u128Value.au64[0];
10098 pu128Dst->au64[1] = u128Value.au64[1];
10099 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10100}
10101#endif
10102
10103
10104/**
10105 * Stores a data dqword, SSE aligned.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10115{
10116 /* The lazy approach for now... */
10117 if ( (GCPtrMem & 15)
10118 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10119 return iemRaiseGeneralProtectionFault0(pVCpu);
10120
10121 PRTUINT128U pu128Dst;
10122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10123 if (rc == VINF_SUCCESS)
10124 {
10125 pu128Dst->au64[0] = u128Value.au64[0];
10126 pu128Dst->au64[1] = u128Value.au64[1];
10127 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10128 }
10129 return rc;
10130}
10131
10132
10133#ifdef IEM_WITH_SETJMP
10134/**
10135 * Stores a data dqword, SSE aligned.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param u128Value The value to store.
10143 */
10144DECL_NO_INLINE(IEM_STATIC, void)
10145iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10146{
10147 /* The lazy approach for now... */
10148 if ( (GCPtrMem & 15) == 0
10149 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10150 {
10151 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10152 pu128Dst->au64[0] = u128Value.au64[0];
10153 pu128Dst->au64[1] = u128Value.au64[1];
10154 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10155 return;
10156 }
10157
10158 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10160}
10161#endif
10162
10163
10164/**
10165 * Stores a data dqword.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param pu256Value Pointer to the value to store.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10175{
10176 /* The lazy approach for now... */
10177 PRTUINT256U pu256Dst;
10178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10179 if (rc == VINF_SUCCESS)
10180 {
10181 pu256Dst->au64[0] = pu256Value->au64[0];
10182 pu256Dst->au64[1] = pu256Value->au64[1];
10183 pu256Dst->au64[2] = pu256Value->au64[2];
10184 pu256Dst->au64[3] = pu256Value->au64[3];
10185 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10186 }
10187 return rc;
10188}
10189
10190
10191#ifdef IEM_WITH_SETJMP
10192/**
10193 * Stores a data dqword, longjmp on error.
10194 *
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 * @param pu256Value Pointer to the value to store.
10200 */
10201IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10205 pu256Dst->au64[0] = pu256Value->au64[0];
10206 pu256Dst->au64[1] = pu256Value->au64[1];
10207 pu256Dst->au64[2] = pu256Value->au64[2];
10208 pu256Dst->au64[3] = pu256Value->au64[3];
10209 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10210}
10211#endif
10212
10213
10214/**
10215 * Stores a data dqword, AVX aligned.
10216 *
10217 * @returns Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10219 * @param iSegReg The index of the segment register to use for
10220 * this access. The base and limits are checked.
10221 * @param GCPtrMem The address of the guest memory.
10222 * @param pu256Value Pointer to the value to store.
10223 */
10224IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10225{
10226 /* The lazy approach for now... */
10227 if (GCPtrMem & 31)
10228 return iemRaiseGeneralProtectionFault0(pVCpu);
10229
10230 PRTUINT256U pu256Dst;
10231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10232 if (rc == VINF_SUCCESS)
10233 {
10234 pu256Dst->au64[0] = pu256Value->au64[0];
10235 pu256Dst->au64[1] = pu256Value->au64[1];
10236 pu256Dst->au64[2] = pu256Value->au64[2];
10237 pu256Dst->au64[3] = pu256Value->au64[3];
10238 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10239 }
10240 return rc;
10241}
10242
10243
10244#ifdef IEM_WITH_SETJMP
10245/**
10246 * Stores a data dqword, AVX aligned.
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10250 * @param iSegReg The index of the segment register to use for
10251 * this access. The base and limits are checked.
10252 * @param GCPtrMem The address of the guest memory.
10253 * @param pu256Value Pointer to the value to store.
10254 */
10255DECL_NO_INLINE(IEM_STATIC, void)
10256iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10257{
10258 /* The lazy approach for now... */
10259 if ((GCPtrMem & 31) == 0)
10260 {
10261 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10262 pu256Dst->au64[0] = pu256Value->au64[0];
10263 pu256Dst->au64[1] = pu256Value->au64[1];
10264 pu256Dst->au64[2] = pu256Value->au64[2];
10265 pu256Dst->au64[3] = pu256Value->au64[3];
10266 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10267 return;
10268 }
10269
10270 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10271 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10272}
10273#endif
10274
10275
10276/**
10277 * Stores a descriptor register (sgdt, sidt).
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10281 * @param cbLimit The limit.
10282 * @param GCPtrBase The base address.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 */
10287IEM_STATIC VBOXSTRICTRC
10288iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10289{
10290 VBOXSTRICTRC rcStrict;
10291 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10292 {
10293 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10294 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10295 }
10296
10297 /*
10298 * The SIDT and SGDT instructions actually stores the data using two
10299 * independent writes. The instructions does not respond to opsize prefixes.
10300 */
10301 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10302 if (rcStrict == VINF_SUCCESS)
10303 {
10304 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10305 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10306 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10307 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10308 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10309 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10310 else
10311 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10312 }
10313 return rcStrict;
10314}
10315
10316
10317/**
10318 * Pushes a word onto the stack.
10319 *
10320 * @returns Strict VBox status code.
10321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10322 * @param u16Value The value to push.
10323 */
10324IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10325{
10326 /* Increment the stack pointer. */
10327 uint64_t uNewRsp;
10328 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10329 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10330
10331 /* Write the word the lazy way. */
10332 uint16_t *pu16Dst;
10333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10334 if (rc == VINF_SUCCESS)
10335 {
10336 *pu16Dst = u16Value;
10337 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10338 }
10339
10340 /* Commit the new RSP value unless we an access handler made trouble. */
10341 if (rc == VINF_SUCCESS)
10342 pCtx->rsp = uNewRsp;
10343
10344 return rc;
10345}
10346
10347
10348/**
10349 * Pushes a dword onto the stack.
10350 *
10351 * @returns Strict VBox status code.
10352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10353 * @param u32Value The value to push.
10354 */
10355IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10356{
10357 /* Increment the stack pointer. */
10358 uint64_t uNewRsp;
10359 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10360 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10361
10362 /* Write the dword the lazy way. */
10363 uint32_t *pu32Dst;
10364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10365 if (rc == VINF_SUCCESS)
10366 {
10367 *pu32Dst = u32Value;
10368 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10369 }
10370
10371 /* Commit the new RSP value unless we an access handler made trouble. */
10372 if (rc == VINF_SUCCESS)
10373 pCtx->rsp = uNewRsp;
10374
10375 return rc;
10376}
10377
10378
10379/**
10380 * Pushes a dword segment register value onto the stack.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param u32Value The value to push.
10385 */
10386IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10387{
10388 /* Increment the stack pointer. */
10389 uint64_t uNewRsp;
10390 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10391 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10392
10393 VBOXSTRICTRC rc;
10394 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10395 {
10396 /* The recompiler writes a full dword. */
10397 uint32_t *pu32Dst;
10398 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu32Dst = u32Value;
10402 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10403 }
10404 }
10405 else
10406 {
10407 /* The intel docs talks about zero extending the selector register
10408 value. My actual intel CPU here might be zero extending the value
10409 but it still only writes the lower word... */
10410 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10411 * happens when crossing an electric page boundrary, is the high word checked
10412 * for write accessibility or not? Probably it is. What about segment limits?
10413 * It appears this behavior is also shared with trap error codes.
10414 *
10415 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10416 * ancient hardware when it actually did change. */
10417 uint16_t *pu16Dst;
10418 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10419 if (rc == VINF_SUCCESS)
10420 {
10421 *pu16Dst = (uint16_t)u32Value;
10422 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10423 }
10424 }
10425
10426 /* Commit the new RSP value unless we an access handler made trouble. */
10427 if (rc == VINF_SUCCESS)
10428 pCtx->rsp = uNewRsp;
10429
10430 return rc;
10431}
10432
10433
10434/**
10435 * Pushes a qword onto the stack.
10436 *
10437 * @returns Strict VBox status code.
10438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10439 * @param u64Value The value to push.
10440 */
10441IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10442{
10443 /* Increment the stack pointer. */
10444 uint64_t uNewRsp;
10445 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10446 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10447
10448 /* Write the word the lazy way. */
10449 uint64_t *pu64Dst;
10450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10451 if (rc == VINF_SUCCESS)
10452 {
10453 *pu64Dst = u64Value;
10454 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10455 }
10456
10457 /* Commit the new RSP value unless we an access handler made trouble. */
10458 if (rc == VINF_SUCCESS)
10459 pCtx->rsp = uNewRsp;
10460
10461 return rc;
10462}
10463
10464
10465/**
10466 * Pops a word from the stack.
10467 *
10468 * @returns Strict VBox status code.
10469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10470 * @param pu16Value Where to store the popped value.
10471 */
10472IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10473{
10474 /* Increment the stack pointer. */
10475 uint64_t uNewRsp;
10476 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10477 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10478
10479 /* Write the word the lazy way. */
10480 uint16_t const *pu16Src;
10481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10482 if (rc == VINF_SUCCESS)
10483 {
10484 *pu16Value = *pu16Src;
10485 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10486
10487 /* Commit the new RSP value. */
10488 if (rc == VINF_SUCCESS)
10489 pCtx->rsp = uNewRsp;
10490 }
10491
10492 return rc;
10493}
10494
10495
10496/**
10497 * Pops a dword from the stack.
10498 *
10499 * @returns Strict VBox status code.
10500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10501 * @param pu32Value Where to store the popped value.
10502 */
10503IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10504{
10505 /* Increment the stack pointer. */
10506 uint64_t uNewRsp;
10507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10508 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10509
10510 /* Write the word the lazy way. */
10511 uint32_t const *pu32Src;
10512 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10513 if (rc == VINF_SUCCESS)
10514 {
10515 *pu32Value = *pu32Src;
10516 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10517
10518 /* Commit the new RSP value. */
10519 if (rc == VINF_SUCCESS)
10520 pCtx->rsp = uNewRsp;
10521 }
10522
10523 return rc;
10524}
10525
10526
10527/**
10528 * Pops a qword from the stack.
10529 *
10530 * @returns Strict VBox status code.
10531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10532 * @param pu64Value Where to store the popped value.
10533 */
10534IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10535{
10536 /* Increment the stack pointer. */
10537 uint64_t uNewRsp;
10538 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10539 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10540
10541 /* Write the word the lazy way. */
10542 uint64_t const *pu64Src;
10543 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10544 if (rc == VINF_SUCCESS)
10545 {
10546 *pu64Value = *pu64Src;
10547 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10548
10549 /* Commit the new RSP value. */
10550 if (rc == VINF_SUCCESS)
10551 pCtx->rsp = uNewRsp;
10552 }
10553
10554 return rc;
10555}
10556
10557
10558/**
10559 * Pushes a word onto the stack, using a temporary stack pointer.
10560 *
10561 * @returns Strict VBox status code.
10562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10563 * @param u16Value The value to push.
10564 * @param pTmpRsp Pointer to the temporary stack pointer.
10565 */
10566IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10567{
10568 /* Increment the stack pointer. */
10569 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10570 RTUINT64U NewRsp = *pTmpRsp;
10571 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10572
10573 /* Write the word the lazy way. */
10574 uint16_t *pu16Dst;
10575 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10576 if (rc == VINF_SUCCESS)
10577 {
10578 *pu16Dst = u16Value;
10579 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10580 }
10581
10582 /* Commit the new RSP value unless we an access handler made trouble. */
10583 if (rc == VINF_SUCCESS)
10584 *pTmpRsp = NewRsp;
10585
10586 return rc;
10587}
10588
10589
10590/**
10591 * Pushes a dword onto the stack, using a temporary stack pointer.
10592 *
10593 * @returns Strict VBox status code.
10594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10595 * @param u32Value The value to push.
10596 * @param pTmpRsp Pointer to the temporary stack pointer.
10597 */
10598IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10599{
10600 /* Increment the stack pointer. */
10601 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10602 RTUINT64U NewRsp = *pTmpRsp;
10603 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10604
10605 /* Write the word the lazy way. */
10606 uint32_t *pu32Dst;
10607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10608 if (rc == VINF_SUCCESS)
10609 {
10610 *pu32Dst = u32Value;
10611 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10612 }
10613
10614 /* Commit the new RSP value unless we an access handler made trouble. */
10615 if (rc == VINF_SUCCESS)
10616 *pTmpRsp = NewRsp;
10617
10618 return rc;
10619}
10620
10621
10622/**
10623 * Pushes a dword onto the stack, using a temporary stack pointer.
10624 *
10625 * @returns Strict VBox status code.
10626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10627 * @param u64Value The value to push.
10628 * @param pTmpRsp Pointer to the temporary stack pointer.
10629 */
10630IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10631{
10632 /* Increment the stack pointer. */
10633 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10634 RTUINT64U NewRsp = *pTmpRsp;
10635 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10636
10637 /* Write the word the lazy way. */
10638 uint64_t *pu64Dst;
10639 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10640 if (rc == VINF_SUCCESS)
10641 {
10642 *pu64Dst = u64Value;
10643 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10644 }
10645
10646 /* Commit the new RSP value unless we an access handler made trouble. */
10647 if (rc == VINF_SUCCESS)
10648 *pTmpRsp = NewRsp;
10649
10650 return rc;
10651}
10652
10653
10654/**
10655 * Pops a word from the stack, using a temporary stack pointer.
10656 *
10657 * @returns Strict VBox status code.
10658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10659 * @param pu16Value Where to store the popped value.
10660 * @param pTmpRsp Pointer to the temporary stack pointer.
10661 */
10662IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10663{
10664 /* Increment the stack pointer. */
10665 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10666 RTUINT64U NewRsp = *pTmpRsp;
10667 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10668
10669 /* Write the word the lazy way. */
10670 uint16_t const *pu16Src;
10671 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10672 if (rc == VINF_SUCCESS)
10673 {
10674 *pu16Value = *pu16Src;
10675 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10676
10677 /* Commit the new RSP value. */
10678 if (rc == VINF_SUCCESS)
10679 *pTmpRsp = NewRsp;
10680 }
10681
10682 return rc;
10683}
10684
10685
10686/**
10687 * Pops a dword from the stack, using a temporary stack pointer.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param pu32Value Where to store the popped value.
10692 * @param pTmpRsp Pointer to the temporary stack pointer.
10693 */
10694IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10695{
10696 /* Increment the stack pointer. */
10697 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10698 RTUINT64U NewRsp = *pTmpRsp;
10699 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10700
10701 /* Write the word the lazy way. */
10702 uint32_t const *pu32Src;
10703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10704 if (rc == VINF_SUCCESS)
10705 {
10706 *pu32Value = *pu32Src;
10707 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10708
10709 /* Commit the new RSP value. */
10710 if (rc == VINF_SUCCESS)
10711 *pTmpRsp = NewRsp;
10712 }
10713
10714 return rc;
10715}
10716
10717
10718/**
10719 * Pops a qword from the stack, using a temporary stack pointer.
10720 *
10721 * @returns Strict VBox status code.
10722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10723 * @param pu64Value Where to store the popped value.
10724 * @param pTmpRsp Pointer to the temporary stack pointer.
10725 */
10726IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10727{
10728 /* Increment the stack pointer. */
10729 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10730 RTUINT64U NewRsp = *pTmpRsp;
10731 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10732
10733 /* Write the word the lazy way. */
10734 uint64_t const *pu64Src;
10735 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10736 if (rcStrict == VINF_SUCCESS)
10737 {
10738 *pu64Value = *pu64Src;
10739 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10740
10741 /* Commit the new RSP value. */
10742 if (rcStrict == VINF_SUCCESS)
10743 *pTmpRsp = NewRsp;
10744 }
10745
10746 return rcStrict;
10747}
10748
10749
10750/**
10751 * Begin a special stack push (used by interrupt, exceptions and such).
10752 *
10753 * This will raise \#SS or \#PF if appropriate.
10754 *
10755 * @returns Strict VBox status code.
10756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10757 * @param cbMem The number of bytes to push onto the stack.
10758 * @param ppvMem Where to return the pointer to the stack memory.
10759 * As with the other memory functions this could be
10760 * direct access or bounce buffered access, so
10761 * don't commit register until the commit call
10762 * succeeds.
10763 * @param puNewRsp Where to return the new RSP value. This must be
10764 * passed unchanged to
10765 * iemMemStackPushCommitSpecial().
10766 */
10767IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10768{
10769 Assert(cbMem < UINT8_MAX);
10770 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10771 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10772 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10773}
10774
10775
10776/**
10777 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10778 *
10779 * This will update the rSP.
10780 *
10781 * @returns Strict VBox status code.
10782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10783 * @param pvMem The pointer returned by
10784 * iemMemStackPushBeginSpecial().
10785 * @param uNewRsp The new RSP value returned by
10786 * iemMemStackPushBeginSpecial().
10787 */
10788IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10789{
10790 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10791 if (rcStrict == VINF_SUCCESS)
10792 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10793 return rcStrict;
10794}
10795
10796
10797/**
10798 * Begin a special stack pop (used by iret, retf and such).
10799 *
10800 * This will raise \#SS or \#PF if appropriate.
10801 *
10802 * @returns Strict VBox status code.
10803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10804 * @param cbMem The number of bytes to pop from the stack.
10805 * @param ppvMem Where to return the pointer to the stack memory.
10806 * @param puNewRsp Where to return the new RSP value. This must be
10807 * assigned to CPUMCTX::rsp manually some time
10808 * after iemMemStackPopDoneSpecial() has been
10809 * called.
10810 */
10811IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10812{
10813 Assert(cbMem < UINT8_MAX);
10814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10815 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10816 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10817}
10818
10819
10820/**
10821 * Continue a special stack pop (used by iret and retf).
10822 *
10823 * This will raise \#SS or \#PF if appropriate.
10824 *
10825 * @returns Strict VBox status code.
10826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10827 * @param cbMem The number of bytes to pop from the stack.
10828 * @param ppvMem Where to return the pointer to the stack memory.
10829 * @param puNewRsp Where to return the new RSP value. This must be
10830 * assigned to CPUMCTX::rsp manually some time
10831 * after iemMemStackPopDoneSpecial() has been
10832 * called.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10835{
10836 Assert(cbMem < UINT8_MAX);
10837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10838 RTUINT64U NewRsp;
10839 NewRsp.u = *puNewRsp;
10840 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10841 *puNewRsp = NewRsp.u;
10842 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10843}
10844
10845
10846/**
10847 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10848 * iemMemStackPopContinueSpecial).
10849 *
10850 * The caller will manually commit the rSP.
10851 *
10852 * @returns Strict VBox status code.
10853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10854 * @param pvMem The pointer returned by
10855 * iemMemStackPopBeginSpecial() or
10856 * iemMemStackPopContinueSpecial().
10857 */
10858IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10859{
10860 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10861}
10862
10863
10864/**
10865 * Fetches a system table byte.
10866 *
10867 * @returns Strict VBox status code.
10868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10869 * @param pbDst Where to return the byte.
10870 * @param iSegReg The index of the segment register to use for
10871 * this access. The base and limits are checked.
10872 * @param GCPtrMem The address of the guest memory.
10873 */
10874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10875{
10876 /* The lazy approach for now... */
10877 uint8_t const *pbSrc;
10878 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10879 if (rc == VINF_SUCCESS)
10880 {
10881 *pbDst = *pbSrc;
10882 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10883 }
10884 return rc;
10885}
10886
10887
10888/**
10889 * Fetches a system table word.
10890 *
10891 * @returns Strict VBox status code.
10892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10893 * @param pu16Dst Where to return the word.
10894 * @param iSegReg The index of the segment register to use for
10895 * this access. The base and limits are checked.
10896 * @param GCPtrMem The address of the guest memory.
10897 */
10898IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10899{
10900 /* The lazy approach for now... */
10901 uint16_t const *pu16Src;
10902 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10903 if (rc == VINF_SUCCESS)
10904 {
10905 *pu16Dst = *pu16Src;
10906 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10907 }
10908 return rc;
10909}
10910
10911
10912/**
10913 * Fetches a system table dword.
10914 *
10915 * @returns Strict VBox status code.
10916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10917 * @param pu32Dst Where to return the dword.
10918 * @param iSegReg The index of the segment register to use for
10919 * this access. The base and limits are checked.
10920 * @param GCPtrMem The address of the guest memory.
10921 */
10922IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10923{
10924 /* The lazy approach for now... */
10925 uint32_t const *pu32Src;
10926 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10927 if (rc == VINF_SUCCESS)
10928 {
10929 *pu32Dst = *pu32Src;
10930 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10931 }
10932 return rc;
10933}
10934
10935
10936/**
10937 * Fetches a system table qword.
10938 *
10939 * @returns Strict VBox status code.
10940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10941 * @param pu64Dst Where to return the qword.
10942 * @param iSegReg The index of the segment register to use for
10943 * this access. The base and limits are checked.
10944 * @param GCPtrMem The address of the guest memory.
10945 */
10946IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10947{
10948 /* The lazy approach for now... */
10949 uint64_t const *pu64Src;
10950 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10951 if (rc == VINF_SUCCESS)
10952 {
10953 *pu64Dst = *pu64Src;
10954 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10955 }
10956 return rc;
10957}
10958
10959
10960/**
10961 * Fetches a descriptor table entry with caller specified error code.
10962 *
10963 * @returns Strict VBox status code.
10964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10965 * @param pDesc Where to return the descriptor table entry.
10966 * @param uSel The selector which table entry to fetch.
10967 * @param uXcpt The exception to raise on table lookup error.
10968 * @param uErrorCode The error code associated with the exception.
10969 */
10970IEM_STATIC VBOXSTRICTRC
10971iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10972{
10973 AssertPtr(pDesc);
10974 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10975
10976 /** @todo did the 286 require all 8 bytes to be accessible? */
10977 /*
10978 * Get the selector table base and check bounds.
10979 */
10980 RTGCPTR GCPtrBase;
10981 if (uSel & X86_SEL_LDT)
10982 {
10983 if ( !pCtx->ldtr.Attr.n.u1Present
10984 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10985 {
10986 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10987 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10988 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10989 uErrorCode, 0);
10990 }
10991
10992 Assert(pCtx->ldtr.Attr.n.u1Present);
10993 GCPtrBase = pCtx->ldtr.u64Base;
10994 }
10995 else
10996 {
10997 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10998 {
10999 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11000 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11001 uErrorCode, 0);
11002 }
11003 GCPtrBase = pCtx->gdtr.pGdt;
11004 }
11005
11006 /*
11007 * Read the legacy descriptor and maybe the long mode extensions if
11008 * required.
11009 */
11010 VBOXSTRICTRC rcStrict;
11011 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11012 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11013 else
11014 {
11015 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11016 if (rcStrict == VINF_SUCCESS)
11017 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11018 if (rcStrict == VINF_SUCCESS)
11019 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11020 if (rcStrict == VINF_SUCCESS)
11021 pDesc->Legacy.au16[3] = 0;
11022 else
11023 return rcStrict;
11024 }
11025
11026 if (rcStrict == VINF_SUCCESS)
11027 {
11028 if ( !IEM_IS_LONG_MODE(pVCpu)
11029 || pDesc->Legacy.Gen.u1DescType)
11030 pDesc->Long.au64[1] = 0;
11031 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11032 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11033 else
11034 {
11035 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11036 /** @todo is this the right exception? */
11037 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11038 }
11039 }
11040 return rcStrict;
11041}
11042
11043
11044/**
11045 * Fetches a descriptor table entry.
11046 *
11047 * @returns Strict VBox status code.
11048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11049 * @param pDesc Where to return the descriptor table entry.
11050 * @param uSel The selector which table entry to fetch.
11051 * @param uXcpt The exception to raise on table lookup error.
11052 */
11053IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11054{
11055 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11056}
11057
11058
11059/**
11060 * Fakes a long mode stack selector for SS = 0.
11061 *
11062 * @param pDescSs Where to return the fake stack descriptor.
11063 * @param uDpl The DPL we want.
11064 */
11065IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11066{
11067 pDescSs->Long.au64[0] = 0;
11068 pDescSs->Long.au64[1] = 0;
11069 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11070 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11071 pDescSs->Long.Gen.u2Dpl = uDpl;
11072 pDescSs->Long.Gen.u1Present = 1;
11073 pDescSs->Long.Gen.u1Long = 1;
11074}
11075
11076
11077/**
11078 * Marks the selector descriptor as accessed (only non-system descriptors).
11079 *
11080 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11081 * will therefore skip the limit checks.
11082 *
11083 * @returns Strict VBox status code.
11084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11085 * @param uSel The selector.
11086 */
11087IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11088{
11089 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11090
11091 /*
11092 * Get the selector table base and calculate the entry address.
11093 */
11094 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11095 ? pCtx->ldtr.u64Base
11096 : pCtx->gdtr.pGdt;
11097 GCPtr += uSel & X86_SEL_MASK;
11098
11099 /*
11100 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11101 * ugly stuff to avoid this. This will make sure it's an atomic access
11102 * as well more or less remove any question about 8-bit or 32-bit accesss.
11103 */
11104 VBOXSTRICTRC rcStrict;
11105 uint32_t volatile *pu32;
11106 if ((GCPtr & 3) == 0)
11107 {
11108 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11109 GCPtr += 2 + 2;
11110 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11111 if (rcStrict != VINF_SUCCESS)
11112 return rcStrict;
11113 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11114 }
11115 else
11116 {
11117 /* The misaligned GDT/LDT case, map the whole thing. */
11118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11119 if (rcStrict != VINF_SUCCESS)
11120 return rcStrict;
11121 switch ((uintptr_t)pu32 & 3)
11122 {
11123 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11124 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11125 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11126 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11127 }
11128 }
11129
11130 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11131}
11132
11133/** @} */
11134
11135
11136/*
11137 * Include the C/C++ implementation of instruction.
11138 */
11139#include "IEMAllCImpl.cpp.h"
11140
11141
11142
11143/** @name "Microcode" macros.
11144 *
11145 * The idea is that we should be able to use the same code to interpret
11146 * instructions as well as recompiler instructions. Thus this obfuscation.
11147 *
11148 * @{
11149 */
11150#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11151#define IEM_MC_END() }
11152#define IEM_MC_PAUSE() do {} while (0)
11153#define IEM_MC_CONTINUE() do {} while (0)
11154
11155/** Internal macro. */
11156#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11157 do \
11158 { \
11159 VBOXSTRICTRC rcStrict2 = a_Expr; \
11160 if (rcStrict2 != VINF_SUCCESS) \
11161 return rcStrict2; \
11162 } while (0)
11163
11164
11165#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11166#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11167#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11168#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11169#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11170#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11171#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11172#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11173#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11174 do { \
11175 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11179 do { \
11180 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11181 return iemRaiseDeviceNotAvailable(pVCpu); \
11182 } while (0)
11183#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11184 do { \
11185 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11186 return iemRaiseMathFault(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11189 do { \
11190 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11191 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11192 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11195 return iemRaiseDeviceNotAvailable(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11198 do { \
11199 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11200 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11207 do { \
11208 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11209 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11210 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11213 return iemRaiseDeviceNotAvailable(pVCpu); \
11214 } while (0)
11215#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11216 do { \
11217 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11218 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11219 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11220 return iemRaiseUndefinedOpcode(pVCpu); \
11221 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11222 return iemRaiseDeviceNotAvailable(pVCpu); \
11223 } while (0)
11224#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11225 do { \
11226 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11227 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11228 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11229 return iemRaiseUndefinedOpcode(pVCpu); \
11230 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11231 return iemRaiseDeviceNotAvailable(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11234 do { \
11235 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11236 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11237 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11238 return iemRaiseUndefinedOpcode(pVCpu); \
11239 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11240 return iemRaiseDeviceNotAvailable(pVCpu); \
11241 } while (0)
11242#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11243 do { \
11244 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11246 return iemRaiseUndefinedOpcode(pVCpu); \
11247 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11248 return iemRaiseDeviceNotAvailable(pVCpu); \
11249 } while (0)
11250#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11251 do { \
11252 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11253 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11254 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11255 return iemRaiseUndefinedOpcode(pVCpu); \
11256 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11257 return iemRaiseDeviceNotAvailable(pVCpu); \
11258 } while (0)
11259#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11260 do { \
11261 if (pVCpu->iem.s.uCpl != 0) \
11262 return iemRaiseGeneralProtectionFault0(pVCpu); \
11263 } while (0)
11264#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11265 do { \
11266 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11267 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11268 } while (0)
11269#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11270 do { \
11271 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11272 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11273 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11274 return iemRaiseUndefinedOpcode(pVCpu); \
11275 } while (0)
11276
11277
11278#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11279#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11280#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11281#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11282#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11283#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11284#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11285 uint32_t a_Name; \
11286 uint32_t *a_pName = &a_Name
11287#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11288 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11289
11290#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11291#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11292
11293#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11294#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11295#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11296#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11297#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11298#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11310#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11311#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11312#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11313#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11314#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11315#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11316#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11317#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11318#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11319#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11320#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11321#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11322#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11323#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11324/** @note Not for IOPL or IF testing or modification. */
11325#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11326#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11327#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11328#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11329
11330#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11331#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11332#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11333#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11334#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11335#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11336#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11337#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11338#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11339#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11340#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11341#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11342#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11343 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11344
11345
11346#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11347#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11348/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11349 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11350#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11351#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11352/** @note Not for IOPL or IF testing or modification. */
11353#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11354
11355#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11356#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11357#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11358 do { \
11359 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11360 *pu32Reg += (a_u32Value); \
11361 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11362 } while (0)
11363#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11364
11365#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11366#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11367#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11368 do { \
11369 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11370 *pu32Reg -= (a_u32Value); \
11371 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11372 } while (0)
11373#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11374#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11375
11376#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11377#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11378#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11379#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11380#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11381#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11382#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11383
11384#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11385#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11386#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11387#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11388
11389#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11390#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11391#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11392
11393#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11394#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11395#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11396
11397#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11398#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11399#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11400
11401#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11402#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11403#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11404
11405#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11406
11407#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11408
11409#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11410#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11411#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11412 do { \
11413 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11414 *pu32Reg &= (a_u32Value); \
11415 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11416 } while (0)
11417#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11418
11419#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11420#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11421#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11422 do { \
11423 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11424 *pu32Reg |= (a_u32Value); \
11425 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11426 } while (0)
11427#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11428
11429
11430/** @note Not for IOPL or IF modification. */
11431#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11432/** @note Not for IOPL or IF modification. */
11433#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11434/** @note Not for IOPL or IF modification. */
11435#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11436
11437#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11438
11439/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11440#define IEM_MC_FPU_TO_MMX_MODE() do { \
11441 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11442 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11443 } while (0)
11444
11445/** Switches the FPU state from MMX mode (FTW=0xffff). */
11446#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11447 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11448 } while (0)
11449
11450#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11451 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11452#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11453 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11454#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11456 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11457 } while (0)
11458#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11459 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11460 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11461 } while (0)
11462#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11463 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11464#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11465 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11466#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11467 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11468
11469#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11470 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11471 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11472 } while (0)
11473#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11474 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11475#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11476 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11477#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11478 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11479#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11480 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11481 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11482 } while (0)
11483#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11484 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11485#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11486 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11487 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11488 } while (0)
11489#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11490 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11491#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11492 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11493 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11494 } while (0)
11495#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11496 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11497#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11498 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11499#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11500 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11501#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11502 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11503#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11504 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11505 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11506 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11507 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11508 } while (0)
11509
11510#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11511 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11512 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11513 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11514 } while (0)
11515#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11516 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11517 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11518 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11519 } while (0)
11520#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11521 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11522 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11523 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11524 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11525 } while (0)
11526#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11527 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11528 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11529 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11530 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11531 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11532 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11533 } while (0)
11534
11535#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11536#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11537 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11538 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11544 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11545 } while (0)
11546#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11547 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11548 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11553 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11554 } while (0)
11555#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11556 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11557 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11559 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11561 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11562 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11563 } while (0)
11564#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11565 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11566 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11572 } while (0)
11573
11574#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11575 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11576#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11577 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11578#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11579 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11580#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11581 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11582 uintptr_t const iYRegTmp = (a_iYReg); \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11585 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11586 } while (0)
11587
11588#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11589 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11590 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11591 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11592 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11593 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11594 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11595 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11596 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11597 } while (0)
11598#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11599 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11600 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11601 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11602 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11603 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11604 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11605 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11606 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11607 } while (0)
11608#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11609 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11610 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11611 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11612 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11613 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11615 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11616 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11617 } while (0)
11618
11619#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11620 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11621 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11622 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11623 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11625 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11626 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11627 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11628 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11629 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11630 } while (0)
11631#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11632 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11633 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11634 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11635 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11636 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11637 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11638 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11639 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11640 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11641 } while (0)
11642#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11643 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11644 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11645 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11646 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11647 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11648 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11649 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11650 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11651 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11652 } while (0)
11653#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11654 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11655 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11656 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11657 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11658 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11659 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11660 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11661 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11662 } while (0)
11663
11664#ifndef IEM_WITH_SETJMP
11665# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11667# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11669# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11671#else
11672# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11673 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11674# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11675 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11676# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11677 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11678#endif
11679
11680#ifndef IEM_WITH_SETJMP
11681# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11683# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11685# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11687#else
11688# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11689 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11690# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11691 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11692# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11693 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11694#endif
11695
11696#ifndef IEM_WITH_SETJMP
11697# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11703#else
11704# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11705 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11706# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11707 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11708# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11709 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11710#endif
11711
11712#ifdef SOME_UNUSED_FUNCTION
11713# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11715#endif
11716
11717#ifndef IEM_WITH_SETJMP
11718# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11720# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11722# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11724# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11726#else
11727# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11728 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11729# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11730 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11731# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11734 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11735#endif
11736
11737#ifndef IEM_WITH_SETJMP
11738# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11740# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11742# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11744#else
11745# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11746 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11750 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11751#endif
11752
11753#ifndef IEM_WITH_SETJMP
11754# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11758#else
11759# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11760 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11761# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11762 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11763#endif
11764
11765#ifndef IEM_WITH_SETJMP
11766# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11767 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11768# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11770#else
11771# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11772 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11773# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11774 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11775#endif
11776
11777
11778
11779#ifndef IEM_WITH_SETJMP
11780# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint8_t u8Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u16Dst) = u8Tmp; \
11785 } while (0)
11786# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11787 do { \
11788 uint8_t u8Tmp; \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11790 (a_u32Dst) = u8Tmp; \
11791 } while (0)
11792# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11793 do { \
11794 uint8_t u8Tmp; \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11796 (a_u64Dst) = u8Tmp; \
11797 } while (0)
11798# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11799 do { \
11800 uint16_t u16Tmp; \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11802 (a_u32Dst) = u16Tmp; \
11803 } while (0)
11804# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11805 do { \
11806 uint16_t u16Tmp; \
11807 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11808 (a_u64Dst) = u16Tmp; \
11809 } while (0)
11810# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 do { \
11812 uint32_t u32Tmp; \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11814 (a_u64Dst) = u32Tmp; \
11815 } while (0)
11816#else /* IEM_WITH_SETJMP */
11817# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11818 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11819# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11820 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11821# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11822 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11823# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11824 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11825# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11826 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11827# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11828 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11829#endif /* IEM_WITH_SETJMP */
11830
11831#ifndef IEM_WITH_SETJMP
11832# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11833 do { \
11834 uint8_t u8Tmp; \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11836 (a_u16Dst) = (int8_t)u8Tmp; \
11837 } while (0)
11838# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11839 do { \
11840 uint8_t u8Tmp; \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11842 (a_u32Dst) = (int8_t)u8Tmp; \
11843 } while (0)
11844# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11845 do { \
11846 uint8_t u8Tmp; \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11848 (a_u64Dst) = (int8_t)u8Tmp; \
11849 } while (0)
11850# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11851 do { \
11852 uint16_t u16Tmp; \
11853 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11854 (a_u32Dst) = (int16_t)u16Tmp; \
11855 } while (0)
11856# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11857 do { \
11858 uint16_t u16Tmp; \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11860 (a_u64Dst) = (int16_t)u16Tmp; \
11861 } while (0)
11862# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11863 do { \
11864 uint32_t u32Tmp; \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11866 (a_u64Dst) = (int32_t)u32Tmp; \
11867 } while (0)
11868#else /* IEM_WITH_SETJMP */
11869# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11870 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11871# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11872 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11873# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11874 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11875# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11876 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11877# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11878 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11879# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11880 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11881#endif /* IEM_WITH_SETJMP */
11882
11883#ifndef IEM_WITH_SETJMP
11884# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11885 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11886# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11888# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11889 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11890# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11891 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11892#else
11893# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11894 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11895# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11896 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11897# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11898 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11899# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11900 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11901#endif
11902
11903#ifndef IEM_WITH_SETJMP
11904# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11906# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11908# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11910# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11912#else
11913# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11914 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11915# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11916 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11917# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11918 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11919# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11920 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11921#endif
11922
11923#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11924#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11925#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11926#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11927#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11928#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11929#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11930 do { \
11931 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11932 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11933 } while (0)
11934
11935#ifndef IEM_WITH_SETJMP
11936# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11938# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11940#else
11941# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11942 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11943# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11944 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11945#endif
11946
11947#ifndef IEM_WITH_SETJMP
11948# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11949 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11950# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11952#else
11953# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11954 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11955# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11956 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11957#endif
11958
11959
11960#define IEM_MC_PUSH_U16(a_u16Value) \
11961 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11962#define IEM_MC_PUSH_U32(a_u32Value) \
11963 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11964#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11965 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11966#define IEM_MC_PUSH_U64(a_u64Value) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11968
11969#define IEM_MC_POP_U16(a_pu16Value) \
11970 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11971#define IEM_MC_POP_U32(a_pu32Value) \
11972 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11973#define IEM_MC_POP_U64(a_pu64Value) \
11974 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11975
11976/** Maps guest memory for direct or bounce buffered access.
11977 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11978 * @remarks May return.
11979 */
11980#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11981 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11982
11983/** Maps guest memory for direct or bounce buffered access.
11984 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11985 * @remarks May return.
11986 */
11987#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11989
11990/** Commits the memory and unmaps the guest memory.
11991 * @remarks May return.
11992 */
11993#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11995
11996/** Commits the memory and unmaps the guest memory unless the FPU status word
11997 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11998 * that would cause FLD not to store.
11999 *
12000 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12001 * store, while \#P will not.
12002 *
12003 * @remarks May in theory return - for now.
12004 */
12005#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12006 do { \
12007 if ( !(a_u16FSW & X86_FSW_ES) \
12008 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12009 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12010 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12011 } while (0)
12012
12013/** Calculate efficient address from R/M. */
12014#ifndef IEM_WITH_SETJMP
12015# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12016 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12017#else
12018# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12019 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12020#endif
12021
12022#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12023#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12024#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12025#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12026#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12027#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12028#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12029
12030/**
12031 * Defers the rest of the instruction emulation to a C implementation routine
12032 * and returns, only taking the standard parameters.
12033 *
12034 * @param a_pfnCImpl The pointer to the C routine.
12035 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12036 */
12037#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12038
12039/**
12040 * Defers the rest of instruction emulation to a C implementation routine and
12041 * returns, taking one argument in addition to the standard ones.
12042 *
12043 * @param a_pfnCImpl The pointer to the C routine.
12044 * @param a0 The argument.
12045 */
12046#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12047
12048/**
12049 * Defers the rest of the instruction emulation to a C implementation routine
12050 * and returns, taking two arguments in addition to the standard ones.
12051 *
12052 * @param a_pfnCImpl The pointer to the C routine.
12053 * @param a0 The first extra argument.
12054 * @param a1 The second extra argument.
12055 */
12056#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12057
12058/**
12059 * Defers the rest of the instruction emulation to a C implementation routine
12060 * and returns, taking three arguments in addition to the standard ones.
12061 *
12062 * @param a_pfnCImpl The pointer to the C routine.
12063 * @param a0 The first extra argument.
12064 * @param a1 The second extra argument.
12065 * @param a2 The third extra argument.
12066 */
12067#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12068
12069/**
12070 * Defers the rest of the instruction emulation to a C implementation routine
12071 * and returns, taking four arguments in addition to the standard ones.
12072 *
12073 * @param a_pfnCImpl The pointer to the C routine.
12074 * @param a0 The first extra argument.
12075 * @param a1 The second extra argument.
12076 * @param a2 The third extra argument.
12077 * @param a3 The fourth extra argument.
12078 */
12079#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12080
12081/**
12082 * Defers the rest of the instruction emulation to a C implementation routine
12083 * and returns, taking two arguments in addition to the standard ones.
12084 *
12085 * @param a_pfnCImpl The pointer to the C routine.
12086 * @param a0 The first extra argument.
12087 * @param a1 The second extra argument.
12088 * @param a2 The third extra argument.
12089 * @param a3 The fourth extra argument.
12090 * @param a4 The fifth extra argument.
12091 */
12092#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12093
12094/**
12095 * Defers the entire instruction emulation to a C implementation routine and
12096 * returns, only taking the standard parameters.
12097 *
12098 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12099 *
12100 * @param a_pfnCImpl The pointer to the C routine.
12101 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12102 */
12103#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12104
12105/**
12106 * Defers the entire instruction emulation to a C implementation routine and
12107 * returns, taking one argument in addition to the standard ones.
12108 *
12109 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12110 *
12111 * @param a_pfnCImpl The pointer to the C routine.
12112 * @param a0 The argument.
12113 */
12114#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12115
12116/**
12117 * Defers the entire instruction emulation to a C implementation routine and
12118 * returns, taking two arguments in addition to the standard ones.
12119 *
12120 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12121 *
12122 * @param a_pfnCImpl The pointer to the C routine.
12123 * @param a0 The first extra argument.
12124 * @param a1 The second extra argument.
12125 */
12126#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12127
12128/**
12129 * Defers the entire instruction emulation to a C implementation routine and
12130 * returns, taking three arguments in addition to the standard ones.
12131 *
12132 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12133 *
12134 * @param a_pfnCImpl The pointer to the C routine.
12135 * @param a0 The first extra argument.
12136 * @param a1 The second extra argument.
12137 * @param a2 The third extra argument.
12138 */
12139#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12140
12141/**
12142 * Calls a FPU assembly implementation taking one visible argument.
12143 *
12144 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12145 * @param a0 The first extra argument.
12146 */
12147#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12148 do { \
12149 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12150 } while (0)
12151
12152/**
12153 * Calls a FPU assembly implementation taking two visible arguments.
12154 *
12155 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12156 * @param a0 The first extra argument.
12157 * @param a1 The second extra argument.
12158 */
12159#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12160 do { \
12161 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12162 } while (0)
12163
12164/**
12165 * Calls a FPU assembly implementation taking three visible arguments.
12166 *
12167 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12168 * @param a0 The first extra argument.
12169 * @param a1 The second extra argument.
12170 * @param a2 The third extra argument.
12171 */
12172#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12173 do { \
12174 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12175 } while (0)
12176
12177#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12178 do { \
12179 (a_FpuData).FSW = (a_FSW); \
12180 (a_FpuData).r80Result = *(a_pr80Value); \
12181 } while (0)
12182
12183/** Pushes FPU result onto the stack. */
12184#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12185 iemFpuPushResult(pVCpu, &a_FpuData)
12186/** Pushes FPU result onto the stack and sets the FPUDP. */
12187#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12188 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12189
12190/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12191#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12192 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12193
12194/** Stores FPU result in a stack register. */
12195#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12196 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12197/** Stores FPU result in a stack register and pops the stack. */
12198#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12199 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12200/** Stores FPU result in a stack register and sets the FPUDP. */
12201#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12202 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12203/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12204 * stack. */
12205#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12206 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12207
12208/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12209#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12210 iemFpuUpdateOpcodeAndIp(pVCpu)
12211/** Free a stack register (for FFREE and FFREEP). */
12212#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12213 iemFpuStackFree(pVCpu, a_iStReg)
12214/** Increment the FPU stack pointer. */
12215#define IEM_MC_FPU_STACK_INC_TOP() \
12216 iemFpuStackIncTop(pVCpu)
12217/** Decrement the FPU stack pointer. */
12218#define IEM_MC_FPU_STACK_DEC_TOP() \
12219 iemFpuStackDecTop(pVCpu)
12220
12221/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12222#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12223 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12224/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12225#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12226 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12227/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12228#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12229 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12230/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12231#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12232 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12233/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12234 * stack. */
12235#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12236 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12237/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12238#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12239 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12240
12241/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12242#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12243 iemFpuStackUnderflow(pVCpu, a_iStDst)
12244/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12245 * stack. */
12246#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12247 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12248/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12249 * FPUDS. */
12250#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12251 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12252/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12253 * FPUDS. Pops stack. */
12254#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12255 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12256/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12257 * stack twice. */
12258#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12259 iemFpuStackUnderflowThenPopPop(pVCpu)
12260/** Raises a FPU stack underflow exception for an instruction pushing a result
12261 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12262#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12263 iemFpuStackPushUnderflow(pVCpu)
12264/** Raises a FPU stack underflow exception for an instruction pushing a result
12265 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12266#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12267 iemFpuStackPushUnderflowTwo(pVCpu)
12268
12269/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12270 * FPUIP, FPUCS and FOP. */
12271#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12272 iemFpuStackPushOverflow(pVCpu)
12273/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12274 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12275#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12276 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12277/** Prepares for using the FPU state.
12278 * Ensures that we can use the host FPU in the current context (RC+R0.
12279 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12280#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12281/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12282#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12283/** Actualizes the guest FPU state so it can be accessed and modified. */
12284#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12285
12286/** Prepares for using the SSE state.
12287 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12288 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12289#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12290/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12291#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12292/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12293#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12294
12295/** Prepares for using the AVX state.
12296 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12297 * Ensures the guest AVX state in the CPUMCTX is up to date.
12298 * @note This will include the AVX512 state too when support for it is added
12299 * due to the zero extending feature of VEX instruction. */
12300#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12301/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12302#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12303/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12304#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12305
12306/**
12307 * Calls a MMX assembly implementation taking two visible arguments.
12308 *
12309 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12310 * @param a0 The first extra argument.
12311 * @param a1 The second extra argument.
12312 */
12313#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12314 do { \
12315 IEM_MC_PREPARE_FPU_USAGE(); \
12316 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12317 } while (0)
12318
12319/**
12320 * Calls a MMX assembly implementation taking three visible arguments.
12321 *
12322 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12323 * @param a0 The first extra argument.
12324 * @param a1 The second extra argument.
12325 * @param a2 The third extra argument.
12326 */
12327#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12328 do { \
12329 IEM_MC_PREPARE_FPU_USAGE(); \
12330 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12331 } while (0)
12332
12333
12334/**
12335 * Calls a SSE assembly implementation taking two visible arguments.
12336 *
12337 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12338 * @param a0 The first extra argument.
12339 * @param a1 The second extra argument.
12340 */
12341#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12342 do { \
12343 IEM_MC_PREPARE_SSE_USAGE(); \
12344 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12345 } while (0)
12346
12347/**
12348 * Calls a SSE assembly implementation taking three visible arguments.
12349 *
12350 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12351 * @param a0 The first extra argument.
12352 * @param a1 The second extra argument.
12353 * @param a2 The third extra argument.
12354 */
12355#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12356 do { \
12357 IEM_MC_PREPARE_SSE_USAGE(); \
12358 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12359 } while (0)
12360
12361
12362/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12363 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12364#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12365 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12366
12367/**
12368 * Calls a AVX assembly implementation taking two visible arguments.
12369 *
12370 * There is one implicit zero'th argument, a pointer to the extended state.
12371 *
12372 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12373 * @param a1 The first extra argument.
12374 * @param a2 The second extra argument.
12375 */
12376#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12377 do { \
12378 IEM_MC_PREPARE_AVX_USAGE(); \
12379 a_pfnAImpl(pXState, (a1), (a2)); \
12380 } while (0)
12381
12382/**
12383 * Calls a AVX assembly implementation taking three visible arguments.
12384 *
12385 * There is one implicit zero'th argument, a pointer to the extended state.
12386 *
12387 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12388 * @param a1 The first extra argument.
12389 * @param a2 The second extra argument.
12390 * @param a3 The third extra argument.
12391 */
12392#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12393 do { \
12394 IEM_MC_PREPARE_AVX_USAGE(); \
12395 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12396 } while (0)
12397
12398/** @note Not for IOPL or IF testing. */
12399#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12400/** @note Not for IOPL or IF testing. */
12401#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12402/** @note Not for IOPL or IF testing. */
12403#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12404/** @note Not for IOPL or IF testing. */
12405#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12406/** @note Not for IOPL or IF testing. */
12407#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12408 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12409 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12410/** @note Not for IOPL or IF testing. */
12411#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12412 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12413 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12416 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12417 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12418 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12421 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12422 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12423 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12424#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12425#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12426#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12429 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12430 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12433 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12434 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12437 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12438 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12439/** @note Not for IOPL or IF testing. */
12440#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12441 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12442 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12443/** @note Not for IOPL or IF testing. */
12444#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12445 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12446 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12447/** @note Not for IOPL or IF testing. */
12448#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12449 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12450 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12451#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12452#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12453
12454#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12455 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12456#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12457 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12458#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12459 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12460#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12461 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12462#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12463 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12464#define IEM_MC_IF_FCW_IM() \
12465 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12466
12467#define IEM_MC_ELSE() } else {
12468#define IEM_MC_ENDIF() } do {} while (0)
12469
12470/** @} */
12471
12472
12473/** @name Opcode Debug Helpers.
12474 * @{
12475 */
12476#ifdef VBOX_WITH_STATISTICS
12477# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12478#else
12479# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12480#endif
12481
12482#ifdef DEBUG
12483# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12484 do { \
12485 IEMOP_INC_STATS(a_Stats); \
12486 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12487 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12488 } while (0)
12489
12490# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12491 do { \
12492 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12493 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12494 (void)RT_CONCAT(OP_,a_Upper); \
12495 (void)(a_fDisHints); \
12496 (void)(a_fIemHints); \
12497 } while (0)
12498
12499# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12500 do { \
12501 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12502 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12503 (void)RT_CONCAT(OP_,a_Upper); \
12504 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12505 (void)(a_fDisHints); \
12506 (void)(a_fIemHints); \
12507 } while (0)
12508
12509# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12510 do { \
12511 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12512 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12513 (void)RT_CONCAT(OP_,a_Upper); \
12514 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12515 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12516 (void)(a_fDisHints); \
12517 (void)(a_fIemHints); \
12518 } while (0)
12519
12520# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12521 do { \
12522 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12523 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12524 (void)RT_CONCAT(OP_,a_Upper); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12526 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12527 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12528 (void)(a_fDisHints); \
12529 (void)(a_fIemHints); \
12530 } while (0)
12531
12532# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12533 do { \
12534 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12535 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12536 (void)RT_CONCAT(OP_,a_Upper); \
12537 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12538 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12540 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12541 (void)(a_fDisHints); \
12542 (void)(a_fIemHints); \
12543 } while (0)
12544
12545#else
12546# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12547
12548# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12549 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12550# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12551 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12552# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12553 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12554# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12555 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12556# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12557 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12558
12559#endif
12560
12561#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC0EX(a_Lower, \
12563 #a_Lower, \
12564 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12565#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12567 #a_Lower " " #a_Op1, \
12568 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12569#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12571 #a_Lower " " #a_Op1 "," #a_Op2, \
12572 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12573#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12574 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12575 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12576 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12577#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12579 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12580 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12581
12582/** @} */
12583
12584
12585/** @name Opcode Helpers.
12586 * @{
12587 */
12588
12589#ifdef IN_RING3
12590# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12591 do { \
12592 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12593 else \
12594 { \
12595 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12596 return IEMOP_RAISE_INVALID_OPCODE(); \
12597 } \
12598 } while (0)
12599#else
12600# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12601 do { \
12602 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12603 else return IEMOP_RAISE_INVALID_OPCODE(); \
12604 } while (0)
12605#endif
12606
12607/** The instruction requires a 186 or later. */
12608#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12609# define IEMOP_HLP_MIN_186() do { } while (0)
12610#else
12611# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12612#endif
12613
12614/** The instruction requires a 286 or later. */
12615#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12616# define IEMOP_HLP_MIN_286() do { } while (0)
12617#else
12618# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12619#endif
12620
12621/** The instruction requires a 386 or later. */
12622#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12623# define IEMOP_HLP_MIN_386() do { } while (0)
12624#else
12625# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12626#endif
12627
12628/** The instruction requires a 386 or later if the given expression is true. */
12629#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12630# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12631#else
12632# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12633#endif
12634
12635/** The instruction requires a 486 or later. */
12636#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12637# define IEMOP_HLP_MIN_486() do { } while (0)
12638#else
12639# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12640#endif
12641
12642/** The instruction requires a Pentium (586) or later. */
12643#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12644# define IEMOP_HLP_MIN_586() do { } while (0)
12645#else
12646# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12647#endif
12648
12649/** The instruction requires a PentiumPro (686) or later. */
12650#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12651# define IEMOP_HLP_MIN_686() do { } while (0)
12652#else
12653# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12654#endif
12655
12656
12657/** The instruction raises an \#UD in real and V8086 mode. */
12658#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12659 do \
12660 { \
12661 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12662 else return IEMOP_RAISE_INVALID_OPCODE(); \
12663 } while (0)
12664
12665/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12666 * 64-bit mode. */
12667#define IEMOP_HLP_NO_64BIT() \
12668 do \
12669 { \
12670 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12671 return IEMOP_RAISE_INVALID_OPCODE(); \
12672 } while (0)
12673
12674/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12675 * 64-bit mode. */
12676#define IEMOP_HLP_ONLY_64BIT() \
12677 do \
12678 { \
12679 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12680 return IEMOP_RAISE_INVALID_OPCODE(); \
12681 } while (0)
12682
12683/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12684#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12685 do \
12686 { \
12687 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12688 iemRecalEffOpSize64Default(pVCpu); \
12689 } while (0)
12690
12691/** The instruction has 64-bit operand size if 64-bit mode. */
12692#define IEMOP_HLP_64BIT_OP_SIZE() \
12693 do \
12694 { \
12695 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12696 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12697 } while (0)
12698
12699/** Only a REX prefix immediately preceeding the first opcode byte takes
12700 * effect. This macro helps ensuring this as well as logging bad guest code. */
12701#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12702 do \
12703 { \
12704 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12705 { \
12706 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12707 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12708 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12709 pVCpu->iem.s.uRexB = 0; \
12710 pVCpu->iem.s.uRexIndex = 0; \
12711 pVCpu->iem.s.uRexReg = 0; \
12712 iemRecalEffOpSize(pVCpu); \
12713 } \
12714 } while (0)
12715
12716/**
12717 * Done decoding.
12718 */
12719#define IEMOP_HLP_DONE_DECODING() \
12720 do \
12721 { \
12722 /*nothing for now, maybe later... */ \
12723 } while (0)
12724
12725/**
12726 * Done decoding, raise \#UD exception if lock prefix present.
12727 */
12728#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12729 do \
12730 { \
12731 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12732 { /* likely */ } \
12733 else \
12734 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12735 } while (0)
12736
12737
12738/**
12739 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12740 * repnz or size prefixes are present, or if in real or v8086 mode.
12741 */
12742#define IEMOP_HLP_DONE_VEX_DECODING() \
12743 do \
12744 { \
12745 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12746 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12747 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12748 { /* likely */ } \
12749 else \
12750 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12751 } while (0)
12752
12753/**
12754 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12755 * repnz or size prefixes are present, or if in real or v8086 mode.
12756 */
12757#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12758 do \
12759 { \
12760 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12761 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12762 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12763 && pVCpu->iem.s.uVexLength == 0)) \
12764 { /* likely */ } \
12765 else \
12766 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12767 } while (0)
12768
12769
12770/**
12771 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12772 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12773 * register 0, or if in real or v8086 mode.
12774 */
12775#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12776 do \
12777 { \
12778 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12779 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12780 && !pVCpu->iem.s.uVex3rdReg \
12781 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12782 { /* likely */ } \
12783 else \
12784 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12785 } while (0)
12786
12787/**
12788 * Done decoding VEX, no V, L=0.
12789 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12790 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12791 */
12792#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12793 do \
12794 { \
12795 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12796 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12797 && pVCpu->iem.s.uVexLength == 0 \
12798 && pVCpu->iem.s.uVex3rdReg == 0 \
12799 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12800 { /* likely */ } \
12801 else \
12802 return IEMOP_RAISE_INVALID_OPCODE(); \
12803 } while (0)
12804
12805#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12806 do \
12807 { \
12808 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12809 { /* likely */ } \
12810 else \
12811 { \
12812 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12813 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12814 } \
12815 } while (0)
12816#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12817 do \
12818 { \
12819 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12820 { /* likely */ } \
12821 else \
12822 { \
12823 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12824 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12825 } \
12826 } while (0)
12827
12828/**
12829 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12830 * are present.
12831 */
12832#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12833 do \
12834 { \
12835 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12836 { /* likely */ } \
12837 else \
12838 return IEMOP_RAISE_INVALID_OPCODE(); \
12839 } while (0)
12840
12841
12842#ifdef VBOX_WITH_NESTED_HWVIRT
12843/** Check and handles SVM nested-guest control & instruction intercept. */
12844# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12845 do \
12846 { \
12847 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12848 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12849 } while (0)
12850
12851/** Check and handle SVM nested-guest CR0 read intercept. */
12852# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12853 do \
12854 { \
12855 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12856 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12857 } while (0)
12858
12859#else /* !VBOX_WITH_NESTED_HWVIRT */
12860# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12861# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12862#endif /* !VBOX_WITH_NESTED_HWVIRT */
12863
12864
12865/**
12866 * Calculates the effective address of a ModR/M memory operand.
12867 *
12868 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12869 *
12870 * @return Strict VBox status code.
12871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12872 * @param bRm The ModRM byte.
12873 * @param cbImm The size of any immediate following the
12874 * effective address opcode bytes. Important for
12875 * RIP relative addressing.
12876 * @param pGCPtrEff Where to return the effective address.
12877 */
12878IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12879{
12880 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12881 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12882# define SET_SS_DEF() \
12883 do \
12884 { \
12885 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12886 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12887 } while (0)
12888
12889 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12890 {
12891/** @todo Check the effective address size crap! */
12892 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12893 {
12894 uint16_t u16EffAddr;
12895
12896 /* Handle the disp16 form with no registers first. */
12897 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12898 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12899 else
12900 {
12901 /* Get the displacment. */
12902 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12903 {
12904 case 0: u16EffAddr = 0; break;
12905 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12906 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12907 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12908 }
12909
12910 /* Add the base and index registers to the disp. */
12911 switch (bRm & X86_MODRM_RM_MASK)
12912 {
12913 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12914 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12915 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12916 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12917 case 4: u16EffAddr += pCtx->si; break;
12918 case 5: u16EffAddr += pCtx->di; break;
12919 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12920 case 7: u16EffAddr += pCtx->bx; break;
12921 }
12922 }
12923
12924 *pGCPtrEff = u16EffAddr;
12925 }
12926 else
12927 {
12928 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12929 uint32_t u32EffAddr;
12930
12931 /* Handle the disp32 form with no registers first. */
12932 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12933 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12934 else
12935 {
12936 /* Get the register (or SIB) value. */
12937 switch ((bRm & X86_MODRM_RM_MASK))
12938 {
12939 case 0: u32EffAddr = pCtx->eax; break;
12940 case 1: u32EffAddr = pCtx->ecx; break;
12941 case 2: u32EffAddr = pCtx->edx; break;
12942 case 3: u32EffAddr = pCtx->ebx; break;
12943 case 4: /* SIB */
12944 {
12945 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12946
12947 /* Get the index and scale it. */
12948 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12949 {
12950 case 0: u32EffAddr = pCtx->eax; break;
12951 case 1: u32EffAddr = pCtx->ecx; break;
12952 case 2: u32EffAddr = pCtx->edx; break;
12953 case 3: u32EffAddr = pCtx->ebx; break;
12954 case 4: u32EffAddr = 0; /*none */ break;
12955 case 5: u32EffAddr = pCtx->ebp; break;
12956 case 6: u32EffAddr = pCtx->esi; break;
12957 case 7: u32EffAddr = pCtx->edi; break;
12958 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12959 }
12960 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12961
12962 /* add base */
12963 switch (bSib & X86_SIB_BASE_MASK)
12964 {
12965 case 0: u32EffAddr += pCtx->eax; break;
12966 case 1: u32EffAddr += pCtx->ecx; break;
12967 case 2: u32EffAddr += pCtx->edx; break;
12968 case 3: u32EffAddr += pCtx->ebx; break;
12969 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12970 case 5:
12971 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12972 {
12973 u32EffAddr += pCtx->ebp;
12974 SET_SS_DEF();
12975 }
12976 else
12977 {
12978 uint32_t u32Disp;
12979 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12980 u32EffAddr += u32Disp;
12981 }
12982 break;
12983 case 6: u32EffAddr += pCtx->esi; break;
12984 case 7: u32EffAddr += pCtx->edi; break;
12985 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12986 }
12987 break;
12988 }
12989 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12990 case 6: u32EffAddr = pCtx->esi; break;
12991 case 7: u32EffAddr = pCtx->edi; break;
12992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12993 }
12994
12995 /* Get and add the displacement. */
12996 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12997 {
12998 case 0:
12999 break;
13000 case 1:
13001 {
13002 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13003 u32EffAddr += i8Disp;
13004 break;
13005 }
13006 case 2:
13007 {
13008 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13009 u32EffAddr += u32Disp;
13010 break;
13011 }
13012 default:
13013 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13014 }
13015
13016 }
13017 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13018 *pGCPtrEff = u32EffAddr;
13019 else
13020 {
13021 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13022 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13023 }
13024 }
13025 }
13026 else
13027 {
13028 uint64_t u64EffAddr;
13029
13030 /* Handle the rip+disp32 form with no registers first. */
13031 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13032 {
13033 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13034 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13035 }
13036 else
13037 {
13038 /* Get the register (or SIB) value. */
13039 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13040 {
13041 case 0: u64EffAddr = pCtx->rax; break;
13042 case 1: u64EffAddr = pCtx->rcx; break;
13043 case 2: u64EffAddr = pCtx->rdx; break;
13044 case 3: u64EffAddr = pCtx->rbx; break;
13045 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13046 case 6: u64EffAddr = pCtx->rsi; break;
13047 case 7: u64EffAddr = pCtx->rdi; break;
13048 case 8: u64EffAddr = pCtx->r8; break;
13049 case 9: u64EffAddr = pCtx->r9; break;
13050 case 10: u64EffAddr = pCtx->r10; break;
13051 case 11: u64EffAddr = pCtx->r11; break;
13052 case 13: u64EffAddr = pCtx->r13; break;
13053 case 14: u64EffAddr = pCtx->r14; break;
13054 case 15: u64EffAddr = pCtx->r15; break;
13055 /* SIB */
13056 case 4:
13057 case 12:
13058 {
13059 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13060
13061 /* Get the index and scale it. */
13062 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13063 {
13064 case 0: u64EffAddr = pCtx->rax; break;
13065 case 1: u64EffAddr = pCtx->rcx; break;
13066 case 2: u64EffAddr = pCtx->rdx; break;
13067 case 3: u64EffAddr = pCtx->rbx; break;
13068 case 4: u64EffAddr = 0; /*none */ break;
13069 case 5: u64EffAddr = pCtx->rbp; break;
13070 case 6: u64EffAddr = pCtx->rsi; break;
13071 case 7: u64EffAddr = pCtx->rdi; break;
13072 case 8: u64EffAddr = pCtx->r8; break;
13073 case 9: u64EffAddr = pCtx->r9; break;
13074 case 10: u64EffAddr = pCtx->r10; break;
13075 case 11: u64EffAddr = pCtx->r11; break;
13076 case 12: u64EffAddr = pCtx->r12; break;
13077 case 13: u64EffAddr = pCtx->r13; break;
13078 case 14: u64EffAddr = pCtx->r14; break;
13079 case 15: u64EffAddr = pCtx->r15; break;
13080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13081 }
13082 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13083
13084 /* add base */
13085 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13086 {
13087 case 0: u64EffAddr += pCtx->rax; break;
13088 case 1: u64EffAddr += pCtx->rcx; break;
13089 case 2: u64EffAddr += pCtx->rdx; break;
13090 case 3: u64EffAddr += pCtx->rbx; break;
13091 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13092 case 6: u64EffAddr += pCtx->rsi; break;
13093 case 7: u64EffAddr += pCtx->rdi; break;
13094 case 8: u64EffAddr += pCtx->r8; break;
13095 case 9: u64EffAddr += pCtx->r9; break;
13096 case 10: u64EffAddr += pCtx->r10; break;
13097 case 11: u64EffAddr += pCtx->r11; break;
13098 case 12: u64EffAddr += pCtx->r12; break;
13099 case 14: u64EffAddr += pCtx->r14; break;
13100 case 15: u64EffAddr += pCtx->r15; break;
13101 /* complicated encodings */
13102 case 5:
13103 case 13:
13104 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13105 {
13106 if (!pVCpu->iem.s.uRexB)
13107 {
13108 u64EffAddr += pCtx->rbp;
13109 SET_SS_DEF();
13110 }
13111 else
13112 u64EffAddr += pCtx->r13;
13113 }
13114 else
13115 {
13116 uint32_t u32Disp;
13117 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13118 u64EffAddr += (int32_t)u32Disp;
13119 }
13120 break;
13121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13122 }
13123 break;
13124 }
13125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13126 }
13127
13128 /* Get and add the displacement. */
13129 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13130 {
13131 case 0:
13132 break;
13133 case 1:
13134 {
13135 int8_t i8Disp;
13136 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13137 u64EffAddr += i8Disp;
13138 break;
13139 }
13140 case 2:
13141 {
13142 uint32_t u32Disp;
13143 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13144 u64EffAddr += (int32_t)u32Disp;
13145 break;
13146 }
13147 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13148 }
13149
13150 }
13151
13152 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13153 *pGCPtrEff = u64EffAddr;
13154 else
13155 {
13156 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13157 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13158 }
13159 }
13160
13161 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13162 return VINF_SUCCESS;
13163}
13164
13165
13166/**
13167 * Calculates the effective address of a ModR/M memory operand.
13168 *
13169 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13170 *
13171 * @return Strict VBox status code.
13172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13173 * @param bRm The ModRM byte.
13174 * @param cbImm The size of any immediate following the
13175 * effective address opcode bytes. Important for
13176 * RIP relative addressing.
13177 * @param pGCPtrEff Where to return the effective address.
13178 * @param offRsp RSP displacement.
13179 */
13180IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13181{
13182 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13183 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13184# define SET_SS_DEF() \
13185 do \
13186 { \
13187 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13188 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13189 } while (0)
13190
13191 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13192 {
13193/** @todo Check the effective address size crap! */
13194 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13195 {
13196 uint16_t u16EffAddr;
13197
13198 /* Handle the disp16 form with no registers first. */
13199 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13200 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13201 else
13202 {
13203 /* Get the displacment. */
13204 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13205 {
13206 case 0: u16EffAddr = 0; break;
13207 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13208 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13209 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13210 }
13211
13212 /* Add the base and index registers to the disp. */
13213 switch (bRm & X86_MODRM_RM_MASK)
13214 {
13215 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13216 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13217 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13218 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13219 case 4: u16EffAddr += pCtx->si; break;
13220 case 5: u16EffAddr += pCtx->di; break;
13221 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13222 case 7: u16EffAddr += pCtx->bx; break;
13223 }
13224 }
13225
13226 *pGCPtrEff = u16EffAddr;
13227 }
13228 else
13229 {
13230 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13231 uint32_t u32EffAddr;
13232
13233 /* Handle the disp32 form with no registers first. */
13234 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13235 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13236 else
13237 {
13238 /* Get the register (or SIB) value. */
13239 switch ((bRm & X86_MODRM_RM_MASK))
13240 {
13241 case 0: u32EffAddr = pCtx->eax; break;
13242 case 1: u32EffAddr = pCtx->ecx; break;
13243 case 2: u32EffAddr = pCtx->edx; break;
13244 case 3: u32EffAddr = pCtx->ebx; break;
13245 case 4: /* SIB */
13246 {
13247 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13248
13249 /* Get the index and scale it. */
13250 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13251 {
13252 case 0: u32EffAddr = pCtx->eax; break;
13253 case 1: u32EffAddr = pCtx->ecx; break;
13254 case 2: u32EffAddr = pCtx->edx; break;
13255 case 3: u32EffAddr = pCtx->ebx; break;
13256 case 4: u32EffAddr = 0; /*none */ break;
13257 case 5: u32EffAddr = pCtx->ebp; break;
13258 case 6: u32EffAddr = pCtx->esi; break;
13259 case 7: u32EffAddr = pCtx->edi; break;
13260 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13261 }
13262 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13263
13264 /* add base */
13265 switch (bSib & X86_SIB_BASE_MASK)
13266 {
13267 case 0: u32EffAddr += pCtx->eax; break;
13268 case 1: u32EffAddr += pCtx->ecx; break;
13269 case 2: u32EffAddr += pCtx->edx; break;
13270 case 3: u32EffAddr += pCtx->ebx; break;
13271 case 4:
13272 u32EffAddr += pCtx->esp + offRsp;
13273 SET_SS_DEF();
13274 break;
13275 case 5:
13276 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13277 {
13278 u32EffAddr += pCtx->ebp;
13279 SET_SS_DEF();
13280 }
13281 else
13282 {
13283 uint32_t u32Disp;
13284 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13285 u32EffAddr += u32Disp;
13286 }
13287 break;
13288 case 6: u32EffAddr += pCtx->esi; break;
13289 case 7: u32EffAddr += pCtx->edi; break;
13290 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13291 }
13292 break;
13293 }
13294 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13295 case 6: u32EffAddr = pCtx->esi; break;
13296 case 7: u32EffAddr = pCtx->edi; break;
13297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13298 }
13299
13300 /* Get and add the displacement. */
13301 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13302 {
13303 case 0:
13304 break;
13305 case 1:
13306 {
13307 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13308 u32EffAddr += i8Disp;
13309 break;
13310 }
13311 case 2:
13312 {
13313 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13314 u32EffAddr += u32Disp;
13315 break;
13316 }
13317 default:
13318 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13319 }
13320
13321 }
13322 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13323 *pGCPtrEff = u32EffAddr;
13324 else
13325 {
13326 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13327 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13328 }
13329 }
13330 }
13331 else
13332 {
13333 uint64_t u64EffAddr;
13334
13335 /* Handle the rip+disp32 form with no registers first. */
13336 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13337 {
13338 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13339 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13340 }
13341 else
13342 {
13343 /* Get the register (or SIB) value. */
13344 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13345 {
13346 case 0: u64EffAddr = pCtx->rax; break;
13347 case 1: u64EffAddr = pCtx->rcx; break;
13348 case 2: u64EffAddr = pCtx->rdx; break;
13349 case 3: u64EffAddr = pCtx->rbx; break;
13350 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13351 case 6: u64EffAddr = pCtx->rsi; break;
13352 case 7: u64EffAddr = pCtx->rdi; break;
13353 case 8: u64EffAddr = pCtx->r8; break;
13354 case 9: u64EffAddr = pCtx->r9; break;
13355 case 10: u64EffAddr = pCtx->r10; break;
13356 case 11: u64EffAddr = pCtx->r11; break;
13357 case 13: u64EffAddr = pCtx->r13; break;
13358 case 14: u64EffAddr = pCtx->r14; break;
13359 case 15: u64EffAddr = pCtx->r15; break;
13360 /* SIB */
13361 case 4:
13362 case 12:
13363 {
13364 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13365
13366 /* Get the index and scale it. */
13367 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13368 {
13369 case 0: u64EffAddr = pCtx->rax; break;
13370 case 1: u64EffAddr = pCtx->rcx; break;
13371 case 2: u64EffAddr = pCtx->rdx; break;
13372 case 3: u64EffAddr = pCtx->rbx; break;
13373 case 4: u64EffAddr = 0; /*none */ break;
13374 case 5: u64EffAddr = pCtx->rbp; break;
13375 case 6: u64EffAddr = pCtx->rsi; break;
13376 case 7: u64EffAddr = pCtx->rdi; break;
13377 case 8: u64EffAddr = pCtx->r8; break;
13378 case 9: u64EffAddr = pCtx->r9; break;
13379 case 10: u64EffAddr = pCtx->r10; break;
13380 case 11: u64EffAddr = pCtx->r11; break;
13381 case 12: u64EffAddr = pCtx->r12; break;
13382 case 13: u64EffAddr = pCtx->r13; break;
13383 case 14: u64EffAddr = pCtx->r14; break;
13384 case 15: u64EffAddr = pCtx->r15; break;
13385 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13386 }
13387 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13388
13389 /* add base */
13390 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13391 {
13392 case 0: u64EffAddr += pCtx->rax; break;
13393 case 1: u64EffAddr += pCtx->rcx; break;
13394 case 2: u64EffAddr += pCtx->rdx; break;
13395 case 3: u64EffAddr += pCtx->rbx; break;
13396 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13397 case 6: u64EffAddr += pCtx->rsi; break;
13398 case 7: u64EffAddr += pCtx->rdi; break;
13399 case 8: u64EffAddr += pCtx->r8; break;
13400 case 9: u64EffAddr += pCtx->r9; break;
13401 case 10: u64EffAddr += pCtx->r10; break;
13402 case 11: u64EffAddr += pCtx->r11; break;
13403 case 12: u64EffAddr += pCtx->r12; break;
13404 case 14: u64EffAddr += pCtx->r14; break;
13405 case 15: u64EffAddr += pCtx->r15; break;
13406 /* complicated encodings */
13407 case 5:
13408 case 13:
13409 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13410 {
13411 if (!pVCpu->iem.s.uRexB)
13412 {
13413 u64EffAddr += pCtx->rbp;
13414 SET_SS_DEF();
13415 }
13416 else
13417 u64EffAddr += pCtx->r13;
13418 }
13419 else
13420 {
13421 uint32_t u32Disp;
13422 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13423 u64EffAddr += (int32_t)u32Disp;
13424 }
13425 break;
13426 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13427 }
13428 break;
13429 }
13430 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13431 }
13432
13433 /* Get and add the displacement. */
13434 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13435 {
13436 case 0:
13437 break;
13438 case 1:
13439 {
13440 int8_t i8Disp;
13441 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13442 u64EffAddr += i8Disp;
13443 break;
13444 }
13445 case 2:
13446 {
13447 uint32_t u32Disp;
13448 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13449 u64EffAddr += (int32_t)u32Disp;
13450 break;
13451 }
13452 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13453 }
13454
13455 }
13456
13457 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13458 *pGCPtrEff = u64EffAddr;
13459 else
13460 {
13461 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13462 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13463 }
13464 }
13465
13466 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13467 return VINF_SUCCESS;
13468}
13469
13470
13471#ifdef IEM_WITH_SETJMP
13472/**
13473 * Calculates the effective address of a ModR/M memory operand.
13474 *
13475 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13476 *
13477 * May longjmp on internal error.
13478 *
13479 * @return The effective address.
13480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13481 * @param bRm The ModRM byte.
13482 * @param cbImm The size of any immediate following the
13483 * effective address opcode bytes. Important for
13484 * RIP relative addressing.
13485 */
13486IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13487{
13488 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13489 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13490# define SET_SS_DEF() \
13491 do \
13492 { \
13493 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13494 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13495 } while (0)
13496
13497 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13498 {
13499/** @todo Check the effective address size crap! */
13500 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13501 {
13502 uint16_t u16EffAddr;
13503
13504 /* Handle the disp16 form with no registers first. */
13505 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13506 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13507 else
13508 {
13509 /* Get the displacment. */
13510 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13511 {
13512 case 0: u16EffAddr = 0; break;
13513 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13514 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13515 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13516 }
13517
13518 /* Add the base and index registers to the disp. */
13519 switch (bRm & X86_MODRM_RM_MASK)
13520 {
13521 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13522 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13523 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13524 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13525 case 4: u16EffAddr += pCtx->si; break;
13526 case 5: u16EffAddr += pCtx->di; break;
13527 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13528 case 7: u16EffAddr += pCtx->bx; break;
13529 }
13530 }
13531
13532 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13533 return u16EffAddr;
13534 }
13535
13536 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13537 uint32_t u32EffAddr;
13538
13539 /* Handle the disp32 form with no registers first. */
13540 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13541 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13542 else
13543 {
13544 /* Get the register (or SIB) value. */
13545 switch ((bRm & X86_MODRM_RM_MASK))
13546 {
13547 case 0: u32EffAddr = pCtx->eax; break;
13548 case 1: u32EffAddr = pCtx->ecx; break;
13549 case 2: u32EffAddr = pCtx->edx; break;
13550 case 3: u32EffAddr = pCtx->ebx; break;
13551 case 4: /* SIB */
13552 {
13553 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13554
13555 /* Get the index and scale it. */
13556 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13557 {
13558 case 0: u32EffAddr = pCtx->eax; break;
13559 case 1: u32EffAddr = pCtx->ecx; break;
13560 case 2: u32EffAddr = pCtx->edx; break;
13561 case 3: u32EffAddr = pCtx->ebx; break;
13562 case 4: u32EffAddr = 0; /*none */ break;
13563 case 5: u32EffAddr = pCtx->ebp; break;
13564 case 6: u32EffAddr = pCtx->esi; break;
13565 case 7: u32EffAddr = pCtx->edi; break;
13566 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13567 }
13568 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13569
13570 /* add base */
13571 switch (bSib & X86_SIB_BASE_MASK)
13572 {
13573 case 0: u32EffAddr += pCtx->eax; break;
13574 case 1: u32EffAddr += pCtx->ecx; break;
13575 case 2: u32EffAddr += pCtx->edx; break;
13576 case 3: u32EffAddr += pCtx->ebx; break;
13577 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13578 case 5:
13579 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13580 {
13581 u32EffAddr += pCtx->ebp;
13582 SET_SS_DEF();
13583 }
13584 else
13585 {
13586 uint32_t u32Disp;
13587 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13588 u32EffAddr += u32Disp;
13589 }
13590 break;
13591 case 6: u32EffAddr += pCtx->esi; break;
13592 case 7: u32EffAddr += pCtx->edi; break;
13593 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13594 }
13595 break;
13596 }
13597 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13598 case 6: u32EffAddr = pCtx->esi; break;
13599 case 7: u32EffAddr = pCtx->edi; break;
13600 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13601 }
13602
13603 /* Get and add the displacement. */
13604 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13605 {
13606 case 0:
13607 break;
13608 case 1:
13609 {
13610 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13611 u32EffAddr += i8Disp;
13612 break;
13613 }
13614 case 2:
13615 {
13616 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13617 u32EffAddr += u32Disp;
13618 break;
13619 }
13620 default:
13621 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13622 }
13623 }
13624
13625 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13626 {
13627 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13628 return u32EffAddr;
13629 }
13630 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13631 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13632 return u32EffAddr & UINT16_MAX;
13633 }
13634
13635 uint64_t u64EffAddr;
13636
13637 /* Handle the rip+disp32 form with no registers first. */
13638 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13639 {
13640 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13641 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13642 }
13643 else
13644 {
13645 /* Get the register (or SIB) value. */
13646 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13647 {
13648 case 0: u64EffAddr = pCtx->rax; break;
13649 case 1: u64EffAddr = pCtx->rcx; break;
13650 case 2: u64EffAddr = pCtx->rdx; break;
13651 case 3: u64EffAddr = pCtx->rbx; break;
13652 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13653 case 6: u64EffAddr = pCtx->rsi; break;
13654 case 7: u64EffAddr = pCtx->rdi; break;
13655 case 8: u64EffAddr = pCtx->r8; break;
13656 case 9: u64EffAddr = pCtx->r9; break;
13657 case 10: u64EffAddr = pCtx->r10; break;
13658 case 11: u64EffAddr = pCtx->r11; break;
13659 case 13: u64EffAddr = pCtx->r13; break;
13660 case 14: u64EffAddr = pCtx->r14; break;
13661 case 15: u64EffAddr = pCtx->r15; break;
13662 /* SIB */
13663 case 4:
13664 case 12:
13665 {
13666 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13667
13668 /* Get the index and scale it. */
13669 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13670 {
13671 case 0: u64EffAddr = pCtx->rax; break;
13672 case 1: u64EffAddr = pCtx->rcx; break;
13673 case 2: u64EffAddr = pCtx->rdx; break;
13674 case 3: u64EffAddr = pCtx->rbx; break;
13675 case 4: u64EffAddr = 0; /*none */ break;
13676 case 5: u64EffAddr = pCtx->rbp; break;
13677 case 6: u64EffAddr = pCtx->rsi; break;
13678 case 7: u64EffAddr = pCtx->rdi; break;
13679 case 8: u64EffAddr = pCtx->r8; break;
13680 case 9: u64EffAddr = pCtx->r9; break;
13681 case 10: u64EffAddr = pCtx->r10; break;
13682 case 11: u64EffAddr = pCtx->r11; break;
13683 case 12: u64EffAddr = pCtx->r12; break;
13684 case 13: u64EffAddr = pCtx->r13; break;
13685 case 14: u64EffAddr = pCtx->r14; break;
13686 case 15: u64EffAddr = pCtx->r15; break;
13687 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13688 }
13689 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13690
13691 /* add base */
13692 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13693 {
13694 case 0: u64EffAddr += pCtx->rax; break;
13695 case 1: u64EffAddr += pCtx->rcx; break;
13696 case 2: u64EffAddr += pCtx->rdx; break;
13697 case 3: u64EffAddr += pCtx->rbx; break;
13698 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13699 case 6: u64EffAddr += pCtx->rsi; break;
13700 case 7: u64EffAddr += pCtx->rdi; break;
13701 case 8: u64EffAddr += pCtx->r8; break;
13702 case 9: u64EffAddr += pCtx->r9; break;
13703 case 10: u64EffAddr += pCtx->r10; break;
13704 case 11: u64EffAddr += pCtx->r11; break;
13705 case 12: u64EffAddr += pCtx->r12; break;
13706 case 14: u64EffAddr += pCtx->r14; break;
13707 case 15: u64EffAddr += pCtx->r15; break;
13708 /* complicated encodings */
13709 case 5:
13710 case 13:
13711 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13712 {
13713 if (!pVCpu->iem.s.uRexB)
13714 {
13715 u64EffAddr += pCtx->rbp;
13716 SET_SS_DEF();
13717 }
13718 else
13719 u64EffAddr += pCtx->r13;
13720 }
13721 else
13722 {
13723 uint32_t u32Disp;
13724 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13725 u64EffAddr += (int32_t)u32Disp;
13726 }
13727 break;
13728 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13729 }
13730 break;
13731 }
13732 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13733 }
13734
13735 /* Get and add the displacement. */
13736 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13737 {
13738 case 0:
13739 break;
13740 case 1:
13741 {
13742 int8_t i8Disp;
13743 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13744 u64EffAddr += i8Disp;
13745 break;
13746 }
13747 case 2:
13748 {
13749 uint32_t u32Disp;
13750 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13751 u64EffAddr += (int32_t)u32Disp;
13752 break;
13753 }
13754 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13755 }
13756
13757 }
13758
13759 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13760 {
13761 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13762 return u64EffAddr;
13763 }
13764 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13765 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13766 return u64EffAddr & UINT32_MAX;
13767}
13768#endif /* IEM_WITH_SETJMP */
13769
13770
13771/** @} */
13772
13773
13774
13775/*
13776 * Include the instructions
13777 */
13778#include "IEMAllInstructions.cpp.h"
13779
13780
13781
13782
13783#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13784
13785/**
13786 * Sets up execution verification mode.
13787 */
13788IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13789{
13790 PVMCPU pVCpu = pVCpu;
13791 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13792
13793 /*
13794 * Always note down the address of the current instruction.
13795 */
13796 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13797 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13798
13799 /*
13800 * Enable verification and/or logging.
13801 */
13802 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13803 if ( fNewNoRem
13804 && ( 0
13805#if 0 /* auto enable on first paged protected mode interrupt */
13806 || ( pOrgCtx->eflags.Bits.u1IF
13807 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13808 && TRPMHasTrap(pVCpu)
13809 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13810#endif
13811#if 0
13812 || ( pOrgCtx->cs == 0x10
13813 && ( pOrgCtx->rip == 0x90119e3e
13814 || pOrgCtx->rip == 0x901d9810)
13815#endif
13816#if 0 /* Auto enable DSL - FPU stuff. */
13817 || ( pOrgCtx->cs == 0x10
13818 && (// pOrgCtx->rip == 0xc02ec07f
13819 //|| pOrgCtx->rip == 0xc02ec082
13820 //|| pOrgCtx->rip == 0xc02ec0c9
13821 0
13822 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13823#endif
13824#if 0 /* Auto enable DSL - fstp st0 stuff. */
13825 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13826#endif
13827#if 0
13828 || pOrgCtx->rip == 0x9022bb3a
13829#endif
13830#if 0
13831 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13832#endif
13833#if 0
13834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13835 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13836#endif
13837#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13838 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13839 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13840 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13841#endif
13842#if 0 /* NT4SP1 - xadd early boot. */
13843 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13844#endif
13845#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13846 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13847#endif
13848#if 0 /* NT4SP1 - cmpxchg (AMD). */
13849 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13850#endif
13851#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13852 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13853#endif
13854#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13856
13857#endif
13858#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13859 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13860
13861#endif
13862#if 0 /* NT4SP1 - frstor [ecx] */
13863 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13864#endif
13865#if 0 /* xxxxxx - All long mode code. */
13866 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13867#endif
13868#if 0 /* rep movsq linux 3.7 64-bit boot. */
13869 || (pOrgCtx->rip == 0x0000000000100241)
13870#endif
13871#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13872 || (pOrgCtx->rip == 0x000000000215e240)
13873#endif
13874#if 0 /* DOS's size-overridden iret to v8086. */
13875 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13876#endif
13877 )
13878 )
13879 {
13880 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13881 RTLogFlags(NULL, "enabled");
13882 fNewNoRem = false;
13883 }
13884 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13885 {
13886 pVCpu->iem.s.fNoRem = fNewNoRem;
13887 if (!fNewNoRem)
13888 {
13889 LogAlways(("Enabling verification mode!\n"));
13890 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13891 }
13892 else
13893 LogAlways(("Disabling verification mode!\n"));
13894 }
13895
13896 /*
13897 * Switch state.
13898 */
13899 if (IEM_VERIFICATION_ENABLED(pVCpu))
13900 {
13901 static CPUMCTX s_DebugCtx; /* Ugly! */
13902
13903 s_DebugCtx = *pOrgCtx;
13904 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13905 }
13906
13907 /*
13908 * See if there is an interrupt pending in TRPM and inject it if we can.
13909 */
13910 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13911 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13912#if defined(VBOX_WITH_NESTED_HWVIRT)
13913 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif;
13914 if (fIntrEnabled)
13915 {
13916 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13917 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13918 else
13919 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13920 }
13921#else
13922 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13923#endif
13924 if ( fIntrEnabled
13925 && TRPMHasTrap(pVCpu)
13926 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13927 {
13928 uint8_t u8TrapNo;
13929 TRPMEVENT enmType;
13930 RTGCUINT uErrCode;
13931 RTGCPTR uCr2;
13932 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13933 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13934 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13935 TRPMResetTrap(pVCpu);
13936 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13937 }
13938
13939 /*
13940 * Reset the counters.
13941 */
13942 pVCpu->iem.s.cIOReads = 0;
13943 pVCpu->iem.s.cIOWrites = 0;
13944 pVCpu->iem.s.fIgnoreRaxRdx = false;
13945 pVCpu->iem.s.fOverlappingMovs = false;
13946 pVCpu->iem.s.fProblematicMemory = false;
13947 pVCpu->iem.s.fUndefinedEFlags = 0;
13948
13949 if (IEM_VERIFICATION_ENABLED(pVCpu))
13950 {
13951 /*
13952 * Free all verification records.
13953 */
13954 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13955 pVCpu->iem.s.pIemEvtRecHead = NULL;
13956 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13957 do
13958 {
13959 while (pEvtRec)
13960 {
13961 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13962 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13963 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13964 pEvtRec = pNext;
13965 }
13966 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13967 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13968 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13969 } while (pEvtRec);
13970 }
13971}
13972
13973
13974/**
13975 * Allocate an event record.
13976 * @returns Pointer to a record.
13977 */
13978IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13979{
13980 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13981 return NULL;
13982
13983 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13984 if (pEvtRec)
13985 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13986 else
13987 {
13988 if (!pVCpu->iem.s.ppIemEvtRecNext)
13989 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13990
13991 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13992 if (!pEvtRec)
13993 return NULL;
13994 }
13995 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13996 pEvtRec->pNext = NULL;
13997 return pEvtRec;
13998}
13999
14000
14001/**
14002 * IOMMMIORead notification.
14003 */
14004VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14005{
14006 PVMCPU pVCpu = VMMGetCpu(pVM);
14007 if (!pVCpu)
14008 return;
14009 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14010 if (!pEvtRec)
14011 return;
14012 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14013 pEvtRec->u.RamRead.GCPhys = GCPhys;
14014 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14015 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14016 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14017}
14018
14019
14020/**
14021 * IOMMMIOWrite notification.
14022 */
14023VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14024{
14025 PVMCPU pVCpu = VMMGetCpu(pVM);
14026 if (!pVCpu)
14027 return;
14028 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14029 if (!pEvtRec)
14030 return;
14031 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14032 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14033 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14034 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14035 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14036 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14037 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14038 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14039 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14040}
14041
14042
14043/**
14044 * IOMIOPortRead notification.
14045 */
14046VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14047{
14048 PVMCPU pVCpu = VMMGetCpu(pVM);
14049 if (!pVCpu)
14050 return;
14051 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14052 if (!pEvtRec)
14053 return;
14054 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14055 pEvtRec->u.IOPortRead.Port = Port;
14056 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14057 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14058 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14059}
14060
14061/**
14062 * IOMIOPortWrite notification.
14063 */
14064VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14065{
14066 PVMCPU pVCpu = VMMGetCpu(pVM);
14067 if (!pVCpu)
14068 return;
14069 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14070 if (!pEvtRec)
14071 return;
14072 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14073 pEvtRec->u.IOPortWrite.Port = Port;
14074 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14075 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14076 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14077 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14078}
14079
14080
14081VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14082{
14083 PVMCPU pVCpu = VMMGetCpu(pVM);
14084 if (!pVCpu)
14085 return;
14086 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14087 if (!pEvtRec)
14088 return;
14089 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14090 pEvtRec->u.IOPortStrRead.Port = Port;
14091 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14092 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14093 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14094 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14095}
14096
14097
14098VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14099{
14100 PVMCPU pVCpu = VMMGetCpu(pVM);
14101 if (!pVCpu)
14102 return;
14103 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14104 if (!pEvtRec)
14105 return;
14106 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14107 pEvtRec->u.IOPortStrWrite.Port = Port;
14108 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14109 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14110 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14111 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14112}
14113
14114
14115/**
14116 * Fakes and records an I/O port read.
14117 *
14118 * @returns VINF_SUCCESS.
14119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14120 * @param Port The I/O port.
14121 * @param pu32Value Where to store the fake value.
14122 * @param cbValue The size of the access.
14123 */
14124IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14125{
14126 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14127 if (pEvtRec)
14128 {
14129 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14130 pEvtRec->u.IOPortRead.Port = Port;
14131 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14132 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14133 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14134 }
14135 pVCpu->iem.s.cIOReads++;
14136 *pu32Value = 0xcccccccc;
14137 return VINF_SUCCESS;
14138}
14139
14140
14141/**
14142 * Fakes and records an I/O port write.
14143 *
14144 * @returns VINF_SUCCESS.
14145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14146 * @param Port The I/O port.
14147 * @param u32Value The value being written.
14148 * @param cbValue The size of the access.
14149 */
14150IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14151{
14152 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14153 if (pEvtRec)
14154 {
14155 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14156 pEvtRec->u.IOPortWrite.Port = Port;
14157 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14158 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14159 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14160 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14161 }
14162 pVCpu->iem.s.cIOWrites++;
14163 return VINF_SUCCESS;
14164}
14165
14166
14167/**
14168 * Used to add extra details about a stub case.
14169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14170 */
14171IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14172{
14173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14174 PVM pVM = pVCpu->CTX_SUFF(pVM);
14175 PVMCPU pVCpu = pVCpu;
14176 char szRegs[4096];
14177 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14178 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14179 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14180 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14181 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14182 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14183 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14184 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14185 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14186 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14187 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14188 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14189 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14190 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14191 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14192 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14193 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14194 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14195 " efer=%016VR{efer}\n"
14196 " pat=%016VR{pat}\n"
14197 " sf_mask=%016VR{sf_mask}\n"
14198 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14199 " lstar=%016VR{lstar}\n"
14200 " star=%016VR{star} cstar=%016VR{cstar}\n"
14201 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14202 );
14203
14204 char szInstr1[256];
14205 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14206 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14207 szInstr1, sizeof(szInstr1), NULL);
14208 char szInstr2[256];
14209 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14210 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14211 szInstr2, sizeof(szInstr2), NULL);
14212
14213 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14214}
14215
14216
14217/**
14218 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14219 * dump to the assertion info.
14220 *
14221 * @param pEvtRec The record to dump.
14222 */
14223IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14224{
14225 switch (pEvtRec->enmEvent)
14226 {
14227 case IEMVERIFYEVENT_IOPORT_READ:
14228 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14229 pEvtRec->u.IOPortWrite.Port,
14230 pEvtRec->u.IOPortWrite.cbValue);
14231 break;
14232 case IEMVERIFYEVENT_IOPORT_WRITE:
14233 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14234 pEvtRec->u.IOPortWrite.Port,
14235 pEvtRec->u.IOPortWrite.cbValue,
14236 pEvtRec->u.IOPortWrite.u32Value);
14237 break;
14238 case IEMVERIFYEVENT_IOPORT_STR_READ:
14239 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14240 pEvtRec->u.IOPortStrWrite.Port,
14241 pEvtRec->u.IOPortStrWrite.cbValue,
14242 pEvtRec->u.IOPortStrWrite.cTransfers);
14243 break;
14244 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14245 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14246 pEvtRec->u.IOPortStrWrite.Port,
14247 pEvtRec->u.IOPortStrWrite.cbValue,
14248 pEvtRec->u.IOPortStrWrite.cTransfers);
14249 break;
14250 case IEMVERIFYEVENT_RAM_READ:
14251 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14252 pEvtRec->u.RamRead.GCPhys,
14253 pEvtRec->u.RamRead.cb);
14254 break;
14255 case IEMVERIFYEVENT_RAM_WRITE:
14256 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14257 pEvtRec->u.RamWrite.GCPhys,
14258 pEvtRec->u.RamWrite.cb,
14259 (int)pEvtRec->u.RamWrite.cb,
14260 pEvtRec->u.RamWrite.ab);
14261 break;
14262 default:
14263 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14264 break;
14265 }
14266}
14267
14268
14269/**
14270 * Raises an assertion on the specified record, showing the given message with
14271 * a record dump attached.
14272 *
14273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14274 * @param pEvtRec1 The first record.
14275 * @param pEvtRec2 The second record.
14276 * @param pszMsg The message explaining why we're asserting.
14277 */
14278IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14279{
14280 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14281 iemVerifyAssertAddRecordDump(pEvtRec1);
14282 iemVerifyAssertAddRecordDump(pEvtRec2);
14283 iemVerifyAssertMsg2(pVCpu);
14284 RTAssertPanic();
14285}
14286
14287
14288/**
14289 * Raises an assertion on the specified record, showing the given message with
14290 * a record dump attached.
14291 *
14292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14293 * @param pEvtRec1 The first record.
14294 * @param pszMsg The message explaining why we're asserting.
14295 */
14296IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14297{
14298 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14299 iemVerifyAssertAddRecordDump(pEvtRec);
14300 iemVerifyAssertMsg2(pVCpu);
14301 RTAssertPanic();
14302}
14303
14304
14305/**
14306 * Verifies a write record.
14307 *
14308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14309 * @param pEvtRec The write record.
14310 * @param fRem Set if REM was doing the other executing. If clear
14311 * it was HM.
14312 */
14313IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14314{
14315 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14316 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14317 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14318 if ( RT_FAILURE(rc)
14319 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14320 {
14321 /* fend off ins */
14322 if ( !pVCpu->iem.s.cIOReads
14323 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14324 || ( pEvtRec->u.RamWrite.cb != 1
14325 && pEvtRec->u.RamWrite.cb != 2
14326 && pEvtRec->u.RamWrite.cb != 4) )
14327 {
14328 /* fend off ROMs and MMIO */
14329 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14330 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14331 {
14332 /* fend off fxsave */
14333 if (pEvtRec->u.RamWrite.cb != 512)
14334 {
14335 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14336 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14337 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14338 RTAssertMsg2Add("%s: %.*Rhxs\n"
14339 "iem: %.*Rhxs\n",
14340 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14341 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14342 iemVerifyAssertAddRecordDump(pEvtRec);
14343 iemVerifyAssertMsg2(pVCpu);
14344 RTAssertPanic();
14345 }
14346 }
14347 }
14348 }
14349
14350}
14351
14352/**
14353 * Performs the post-execution verfication checks.
14354 */
14355IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14356{
14357 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14358 return rcStrictIem;
14359
14360 /*
14361 * Switch back the state.
14362 */
14363 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14364 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14365 Assert(pOrgCtx != pDebugCtx);
14366 IEM_GET_CTX(pVCpu) = pOrgCtx;
14367
14368 /*
14369 * Execute the instruction in REM.
14370 */
14371 bool fRem = false;
14372 PVM pVM = pVCpu->CTX_SUFF(pVM);
14373 PVMCPU pVCpu = pVCpu;
14374 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14375#ifdef IEM_VERIFICATION_MODE_FULL_HM
14376 if ( HMIsEnabled(pVM)
14377 && pVCpu->iem.s.cIOReads == 0
14378 && pVCpu->iem.s.cIOWrites == 0
14379 && !pVCpu->iem.s.fProblematicMemory)
14380 {
14381 uint64_t uStartRip = pOrgCtx->rip;
14382 unsigned iLoops = 0;
14383 do
14384 {
14385 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14386 iLoops++;
14387 } while ( rc == VINF_SUCCESS
14388 || ( rc == VINF_EM_DBG_STEPPED
14389 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14390 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14391 || ( pOrgCtx->rip != pDebugCtx->rip
14392 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14393 && iLoops < 8) );
14394 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14395 rc = VINF_SUCCESS;
14396 }
14397#endif
14398 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14399 || rc == VINF_IOM_R3_IOPORT_READ
14400 || rc == VINF_IOM_R3_IOPORT_WRITE
14401 || rc == VINF_IOM_R3_MMIO_READ
14402 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14403 || rc == VINF_IOM_R3_MMIO_WRITE
14404 || rc == VINF_CPUM_R3_MSR_READ
14405 || rc == VINF_CPUM_R3_MSR_WRITE
14406 || rc == VINF_EM_RESCHEDULE
14407 )
14408 {
14409 EMRemLock(pVM);
14410 rc = REMR3EmulateInstruction(pVM, pVCpu);
14411 AssertRC(rc);
14412 EMRemUnlock(pVM);
14413 fRem = true;
14414 }
14415
14416# if 1 /* Skip unimplemented instructions for now. */
14417 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14418 {
14419 IEM_GET_CTX(pVCpu) = pOrgCtx;
14420 if (rc == VINF_EM_DBG_STEPPED)
14421 return VINF_SUCCESS;
14422 return rc;
14423 }
14424# endif
14425
14426 /*
14427 * Compare the register states.
14428 */
14429 unsigned cDiffs = 0;
14430 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14431 {
14432 //Log(("REM and IEM ends up with different registers!\n"));
14433 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14434
14435# define CHECK_FIELD(a_Field) \
14436 do \
14437 { \
14438 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14439 { \
14440 switch (sizeof(pOrgCtx->a_Field)) \
14441 { \
14442 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14443 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14444 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14445 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14446 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14447 } \
14448 cDiffs++; \
14449 } \
14450 } while (0)
14451# define CHECK_XSTATE_FIELD(a_Field) \
14452 do \
14453 { \
14454 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14455 { \
14456 switch (sizeof(pOrgXState->a_Field)) \
14457 { \
14458 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14459 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14460 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14461 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14462 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14463 } \
14464 cDiffs++; \
14465 } \
14466 } while (0)
14467
14468# define CHECK_BIT_FIELD(a_Field) \
14469 do \
14470 { \
14471 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14472 { \
14473 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14474 cDiffs++; \
14475 } \
14476 } while (0)
14477
14478# define CHECK_SEL(a_Sel) \
14479 do \
14480 { \
14481 CHECK_FIELD(a_Sel.Sel); \
14482 CHECK_FIELD(a_Sel.Attr.u); \
14483 CHECK_FIELD(a_Sel.u64Base); \
14484 CHECK_FIELD(a_Sel.u32Limit); \
14485 CHECK_FIELD(a_Sel.fFlags); \
14486 } while (0)
14487
14488 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14489 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14490
14491#if 1 /* The recompiler doesn't update these the intel way. */
14492 if (fRem)
14493 {
14494 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14495 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14496 pOrgXState->x87.CS = pDebugXState->x87.CS;
14497 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14498 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14499 pOrgXState->x87.DS = pDebugXState->x87.DS;
14500 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14501 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14502 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14503 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14504 }
14505#endif
14506 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14507 {
14508 RTAssertMsg2Weak(" the FPU state differs\n");
14509 cDiffs++;
14510 CHECK_XSTATE_FIELD(x87.FCW);
14511 CHECK_XSTATE_FIELD(x87.FSW);
14512 CHECK_XSTATE_FIELD(x87.FTW);
14513 CHECK_XSTATE_FIELD(x87.FOP);
14514 CHECK_XSTATE_FIELD(x87.FPUIP);
14515 CHECK_XSTATE_FIELD(x87.CS);
14516 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14517 CHECK_XSTATE_FIELD(x87.FPUDP);
14518 CHECK_XSTATE_FIELD(x87.DS);
14519 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14520 CHECK_XSTATE_FIELD(x87.MXCSR);
14521 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14522 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14523 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14524 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14525 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14526 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14527 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14528 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14529 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14530 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14531 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14532 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14533 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14534 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14535 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14536 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14537 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14538 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14539 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14540 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14541 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14542 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14543 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14545 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14546 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14547 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14548 }
14549 CHECK_FIELD(rip);
14550 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14551 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14552 {
14553 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14554 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14555 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14556 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14557 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14558 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14559 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14560 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14561 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14562 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14563 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14564 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14565 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14566 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14567 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14568 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14569 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14570 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14571 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14572 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14573 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14574 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14575 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14576 }
14577
14578 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14579 CHECK_FIELD(rax);
14580 CHECK_FIELD(rcx);
14581 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14582 CHECK_FIELD(rdx);
14583 CHECK_FIELD(rbx);
14584 CHECK_FIELD(rsp);
14585 CHECK_FIELD(rbp);
14586 CHECK_FIELD(rsi);
14587 CHECK_FIELD(rdi);
14588 CHECK_FIELD(r8);
14589 CHECK_FIELD(r9);
14590 CHECK_FIELD(r10);
14591 CHECK_FIELD(r11);
14592 CHECK_FIELD(r12);
14593 CHECK_FIELD(r13);
14594 CHECK_SEL(cs);
14595 CHECK_SEL(ss);
14596 CHECK_SEL(ds);
14597 CHECK_SEL(es);
14598 CHECK_SEL(fs);
14599 CHECK_SEL(gs);
14600 CHECK_FIELD(cr0);
14601
14602 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14603 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14604 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14605 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14606 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14607 {
14608 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14609 { /* ignore */ }
14610 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14611 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14612 && fRem)
14613 { /* ignore */ }
14614 else
14615 CHECK_FIELD(cr2);
14616 }
14617 CHECK_FIELD(cr3);
14618 CHECK_FIELD(cr4);
14619 CHECK_FIELD(dr[0]);
14620 CHECK_FIELD(dr[1]);
14621 CHECK_FIELD(dr[2]);
14622 CHECK_FIELD(dr[3]);
14623 CHECK_FIELD(dr[6]);
14624 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14625 CHECK_FIELD(dr[7]);
14626 CHECK_FIELD(gdtr.cbGdt);
14627 CHECK_FIELD(gdtr.pGdt);
14628 CHECK_FIELD(idtr.cbIdt);
14629 CHECK_FIELD(idtr.pIdt);
14630 CHECK_SEL(ldtr);
14631 CHECK_SEL(tr);
14632 CHECK_FIELD(SysEnter.cs);
14633 CHECK_FIELD(SysEnter.eip);
14634 CHECK_FIELD(SysEnter.esp);
14635 CHECK_FIELD(msrEFER);
14636 CHECK_FIELD(msrSTAR);
14637 CHECK_FIELD(msrPAT);
14638 CHECK_FIELD(msrLSTAR);
14639 CHECK_FIELD(msrCSTAR);
14640 CHECK_FIELD(msrSFMASK);
14641 CHECK_FIELD(msrKERNELGSBASE);
14642
14643 if (cDiffs != 0)
14644 {
14645 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14646 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14647 RTAssertPanic();
14648 static bool volatile s_fEnterDebugger = true;
14649 if (s_fEnterDebugger)
14650 DBGFSTOP(pVM);
14651
14652# if 1 /* Ignore unimplemented instructions for now. */
14653 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14654 rcStrictIem = VINF_SUCCESS;
14655# endif
14656 }
14657# undef CHECK_FIELD
14658# undef CHECK_BIT_FIELD
14659 }
14660
14661 /*
14662 * If the register state compared fine, check the verification event
14663 * records.
14664 */
14665 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14666 {
14667 /*
14668 * Compare verficiation event records.
14669 * - I/O port accesses should be a 1:1 match.
14670 */
14671 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14672 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14673 while (pIemRec && pOtherRec)
14674 {
14675 /* Since we might miss RAM writes and reads, ignore reads and check
14676 that any written memory is the same extra ones. */
14677 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14678 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14679 && pIemRec->pNext)
14680 {
14681 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14682 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14683 pIemRec = pIemRec->pNext;
14684 }
14685
14686 /* Do the compare. */
14687 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14688 {
14689 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14690 break;
14691 }
14692 bool fEquals;
14693 switch (pIemRec->enmEvent)
14694 {
14695 case IEMVERIFYEVENT_IOPORT_READ:
14696 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14697 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14698 break;
14699 case IEMVERIFYEVENT_IOPORT_WRITE:
14700 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14701 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14702 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14703 break;
14704 case IEMVERIFYEVENT_IOPORT_STR_READ:
14705 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14706 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14707 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14708 break;
14709 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14710 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14711 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14712 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14713 break;
14714 case IEMVERIFYEVENT_RAM_READ:
14715 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14716 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14717 break;
14718 case IEMVERIFYEVENT_RAM_WRITE:
14719 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14720 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14721 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14722 break;
14723 default:
14724 fEquals = false;
14725 break;
14726 }
14727 if (!fEquals)
14728 {
14729 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14730 break;
14731 }
14732
14733 /* advance */
14734 pIemRec = pIemRec->pNext;
14735 pOtherRec = pOtherRec->pNext;
14736 }
14737
14738 /* Ignore extra writes and reads. */
14739 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14740 {
14741 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14742 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14743 pIemRec = pIemRec->pNext;
14744 }
14745 if (pIemRec != NULL)
14746 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14747 else if (pOtherRec != NULL)
14748 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14749 }
14750 IEM_GET_CTX(pVCpu) = pOrgCtx;
14751
14752 return rcStrictIem;
14753}
14754
14755#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14756
14757/* stubs */
14758IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14759{
14760 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14761 return VERR_INTERNAL_ERROR;
14762}
14763
14764IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14765{
14766 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14767 return VERR_INTERNAL_ERROR;
14768}
14769
14770#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14771
14772
14773#ifdef LOG_ENABLED
14774/**
14775 * Logs the current instruction.
14776 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14777 * @param pCtx The current CPU context.
14778 * @param fSameCtx Set if we have the same context information as the VMM,
14779 * clear if we may have already executed an instruction in
14780 * our debug context. When clear, we assume IEMCPU holds
14781 * valid CPU mode info.
14782 */
14783IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14784{
14785# ifdef IN_RING3
14786 if (LogIs2Enabled())
14787 {
14788 char szInstr[256];
14789 uint32_t cbInstr = 0;
14790 if (fSameCtx)
14791 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14792 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14793 szInstr, sizeof(szInstr), &cbInstr);
14794 else
14795 {
14796 uint32_t fFlags = 0;
14797 switch (pVCpu->iem.s.enmCpuMode)
14798 {
14799 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14800 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14801 case IEMMODE_16BIT:
14802 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14803 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14804 else
14805 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14806 break;
14807 }
14808 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14809 szInstr, sizeof(szInstr), &cbInstr);
14810 }
14811
14812 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14813 Log2(("****\n"
14814 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14815 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14816 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14817 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14818 " %s\n"
14819 ,
14820 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14821 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14822 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14823 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14824 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14825 szInstr));
14826
14827 if (LogIs3Enabled())
14828 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14829 }
14830 else
14831# endif
14832 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14833 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14834 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14835}
14836#endif
14837
14838
14839/**
14840 * Makes status code addjustments (pass up from I/O and access handler)
14841 * as well as maintaining statistics.
14842 *
14843 * @returns Strict VBox status code to pass up.
14844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14845 * @param rcStrict The status from executing an instruction.
14846 */
14847DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14848{
14849 if (rcStrict != VINF_SUCCESS)
14850 {
14851 if (RT_SUCCESS(rcStrict))
14852 {
14853 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14854 || rcStrict == VINF_IOM_R3_IOPORT_READ
14855 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14856 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14857 || rcStrict == VINF_IOM_R3_MMIO_READ
14858 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14859 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14860 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14861 || rcStrict == VINF_CPUM_R3_MSR_READ
14862 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14863 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14864 || rcStrict == VINF_EM_RAW_TO_R3
14865 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14866 || rcStrict == VINF_EM_TRIPLE_FAULT
14867 /* raw-mode / virt handlers only: */
14868 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14869 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14870 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14871 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14872 || rcStrict == VINF_SELM_SYNC_GDT
14873 || rcStrict == VINF_CSAM_PENDING_ACTION
14874 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14875 /* nested hw.virt codes: */
14876 || rcStrict == VINF_SVM_VMEXIT
14877 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14878/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14879 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14880#ifdef VBOX_WITH_NESTED_HWVIRT
14881 if ( rcStrict == VINF_SVM_VMEXIT
14882 && rcPassUp == VINF_SUCCESS)
14883 rcStrict = VINF_SUCCESS;
14884 else
14885#endif
14886 if (rcPassUp == VINF_SUCCESS)
14887 pVCpu->iem.s.cRetInfStatuses++;
14888 else if ( rcPassUp < VINF_EM_FIRST
14889 || rcPassUp > VINF_EM_LAST
14890 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14891 {
14892 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14893 pVCpu->iem.s.cRetPassUpStatus++;
14894 rcStrict = rcPassUp;
14895 }
14896 else
14897 {
14898 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14899 pVCpu->iem.s.cRetInfStatuses++;
14900 }
14901 }
14902 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14903 pVCpu->iem.s.cRetAspectNotImplemented++;
14904 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14905 pVCpu->iem.s.cRetInstrNotImplemented++;
14906#ifdef IEM_VERIFICATION_MODE_FULL
14907 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14908 rcStrict = VINF_SUCCESS;
14909#endif
14910 else
14911 pVCpu->iem.s.cRetErrStatuses++;
14912 }
14913 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14914 {
14915 pVCpu->iem.s.cRetPassUpStatus++;
14916 rcStrict = pVCpu->iem.s.rcPassUp;
14917 }
14918
14919 return rcStrict;
14920}
14921
14922
14923/**
14924 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14925 * IEMExecOneWithPrefetchedByPC.
14926 *
14927 * Similar code is found in IEMExecLots.
14928 *
14929 * @return Strict VBox status code.
14930 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14932 * @param fExecuteInhibit If set, execute the instruction following CLI,
14933 * POP SS and MOV SS,GR.
14934 */
14935DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14936{
14937#ifdef IEM_WITH_SETJMP
14938 VBOXSTRICTRC rcStrict;
14939 jmp_buf JmpBuf;
14940 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14941 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14942 if ((rcStrict = setjmp(JmpBuf)) == 0)
14943 {
14944 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14945 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14946 }
14947 else
14948 pVCpu->iem.s.cLongJumps++;
14949 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14950#else
14951 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14952 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14953#endif
14954 if (rcStrict == VINF_SUCCESS)
14955 pVCpu->iem.s.cInstructions++;
14956 if (pVCpu->iem.s.cActiveMappings > 0)
14957 {
14958 Assert(rcStrict != VINF_SUCCESS);
14959 iemMemRollback(pVCpu);
14960 }
14961//#ifdef DEBUG
14962// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14963//#endif
14964
14965 /* Execute the next instruction as well if a cli, pop ss or
14966 mov ss, Gr has just completed successfully. */
14967 if ( fExecuteInhibit
14968 && rcStrict == VINF_SUCCESS
14969 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14970 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14971 {
14972 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14973 if (rcStrict == VINF_SUCCESS)
14974 {
14975#ifdef LOG_ENABLED
14976 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14977#endif
14978#ifdef IEM_WITH_SETJMP
14979 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14980 if ((rcStrict = setjmp(JmpBuf)) == 0)
14981 {
14982 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14983 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14984 }
14985 else
14986 pVCpu->iem.s.cLongJumps++;
14987 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14988#else
14989 IEM_OPCODE_GET_NEXT_U8(&b);
14990 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14991#endif
14992 if (rcStrict == VINF_SUCCESS)
14993 pVCpu->iem.s.cInstructions++;
14994 if (pVCpu->iem.s.cActiveMappings > 0)
14995 {
14996 Assert(rcStrict != VINF_SUCCESS);
14997 iemMemRollback(pVCpu);
14998 }
14999 }
15000 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15001 }
15002
15003 /*
15004 * Return value fiddling, statistics and sanity assertions.
15005 */
15006 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15007
15008 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15009 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15010#if defined(IEM_VERIFICATION_MODE_FULL)
15011 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15012 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15015#endif
15016 return rcStrict;
15017}
15018
15019
15020#ifdef IN_RC
15021/**
15022 * Re-enters raw-mode or ensure we return to ring-3.
15023 *
15024 * @returns rcStrict, maybe modified.
15025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15026 * @param pCtx The current CPU context.
15027 * @param rcStrict The status code returne by the interpreter.
15028 */
15029DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15030{
15031 if ( !pVCpu->iem.s.fInPatchCode
15032 && ( rcStrict == VINF_SUCCESS
15033 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15034 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15035 {
15036 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15037 CPUMRawEnter(pVCpu);
15038 else
15039 {
15040 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15041 rcStrict = VINF_EM_RESCHEDULE;
15042 }
15043 }
15044 return rcStrict;
15045}
15046#endif
15047
15048
15049/**
15050 * Execute one instruction.
15051 *
15052 * @return Strict VBox status code.
15053 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15054 */
15055VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15056{
15057#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15058 if (++pVCpu->iem.s.cVerifyDepth == 1)
15059 iemExecVerificationModeSetup(pVCpu);
15060#endif
15061#ifdef LOG_ENABLED
15062 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15063 iemLogCurInstr(pVCpu, pCtx, true);
15064#endif
15065
15066 /*
15067 * Do the decoding and emulation.
15068 */
15069 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15070 if (rcStrict == VINF_SUCCESS)
15071 rcStrict = iemExecOneInner(pVCpu, true);
15072
15073#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15074 /*
15075 * Assert some sanity.
15076 */
15077 if (pVCpu->iem.s.cVerifyDepth == 1)
15078 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15079 pVCpu->iem.s.cVerifyDepth--;
15080#endif
15081#ifdef IN_RC
15082 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15083#endif
15084 if (rcStrict != VINF_SUCCESS)
15085 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15086 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15087 return rcStrict;
15088}
15089
15090
15091VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15092{
15093 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15094 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15095
15096 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15097 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15098 if (rcStrict == VINF_SUCCESS)
15099 {
15100 rcStrict = iemExecOneInner(pVCpu, true);
15101 if (pcbWritten)
15102 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15103 }
15104
15105#ifdef IN_RC
15106 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15107#endif
15108 return rcStrict;
15109}
15110
15111
15112VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15113 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15114{
15115 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15116 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15117
15118 VBOXSTRICTRC rcStrict;
15119 if ( cbOpcodeBytes
15120 && pCtx->rip == OpcodeBytesPC)
15121 {
15122 iemInitDecoder(pVCpu, false);
15123#ifdef IEM_WITH_CODE_TLB
15124 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15125 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15126 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15127 pVCpu->iem.s.offCurInstrStart = 0;
15128 pVCpu->iem.s.offInstrNextByte = 0;
15129#else
15130 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15131 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15132#endif
15133 rcStrict = VINF_SUCCESS;
15134 }
15135 else
15136 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15137 if (rcStrict == VINF_SUCCESS)
15138 {
15139 rcStrict = iemExecOneInner(pVCpu, true);
15140 }
15141
15142#ifdef IN_RC
15143 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15144#endif
15145 return rcStrict;
15146}
15147
15148
15149VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15150{
15151 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15152 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15153
15154 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15155 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15156 if (rcStrict == VINF_SUCCESS)
15157 {
15158 rcStrict = iemExecOneInner(pVCpu, false);
15159 if (pcbWritten)
15160 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15161 }
15162
15163#ifdef IN_RC
15164 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15165#endif
15166 return rcStrict;
15167}
15168
15169
15170VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15171 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15172{
15173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15174 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15175
15176 VBOXSTRICTRC rcStrict;
15177 if ( cbOpcodeBytes
15178 && pCtx->rip == OpcodeBytesPC)
15179 {
15180 iemInitDecoder(pVCpu, true);
15181#ifdef IEM_WITH_CODE_TLB
15182 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15183 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15184 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15185 pVCpu->iem.s.offCurInstrStart = 0;
15186 pVCpu->iem.s.offInstrNextByte = 0;
15187#else
15188 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15189 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15190#endif
15191 rcStrict = VINF_SUCCESS;
15192 }
15193 else
15194 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15195 if (rcStrict == VINF_SUCCESS)
15196 rcStrict = iemExecOneInner(pVCpu, false);
15197
15198#ifdef IN_RC
15199 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15200#endif
15201 return rcStrict;
15202}
15203
15204
15205/**
15206 * For debugging DISGetParamSize, may come in handy.
15207 *
15208 * @returns Strict VBox status code.
15209 * @param pVCpu The cross context virtual CPU structure of the
15210 * calling EMT.
15211 * @param pCtxCore The context core structure.
15212 * @param OpcodeBytesPC The PC of the opcode bytes.
15213 * @param pvOpcodeBytes Prefeched opcode bytes.
15214 * @param cbOpcodeBytes Number of prefetched bytes.
15215 * @param pcbWritten Where to return the number of bytes written.
15216 * Optional.
15217 */
15218VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15219 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15220 uint32_t *pcbWritten)
15221{
15222 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15223 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15224
15225 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15226 VBOXSTRICTRC rcStrict;
15227 if ( cbOpcodeBytes
15228 && pCtx->rip == OpcodeBytesPC)
15229 {
15230 iemInitDecoder(pVCpu, true);
15231#ifdef IEM_WITH_CODE_TLB
15232 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15233 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15234 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15235 pVCpu->iem.s.offCurInstrStart = 0;
15236 pVCpu->iem.s.offInstrNextByte = 0;
15237#else
15238 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15239 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15240#endif
15241 rcStrict = VINF_SUCCESS;
15242 }
15243 else
15244 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15245 if (rcStrict == VINF_SUCCESS)
15246 {
15247 rcStrict = iemExecOneInner(pVCpu, false);
15248 if (pcbWritten)
15249 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15250 }
15251
15252#ifdef IN_RC
15253 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15254#endif
15255 return rcStrict;
15256}
15257
15258
15259VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15260{
15261 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15262
15263#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15264 /*
15265 * See if there is an interrupt pending in TRPM, inject it if we can.
15266 */
15267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15268# ifdef IEM_VERIFICATION_MODE_FULL
15269 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15270# endif
15271
15272 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15273#if defined(VBOX_WITH_NESTED_HWVIRT)
15274 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15275 if (fIntrEnabled)
15276 {
15277 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15278 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15279 else
15280 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15281 }
15282#else
15283 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15284#endif
15285 if ( fIntrEnabled
15286 && TRPMHasTrap(pVCpu)
15287 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15288 {
15289 uint8_t u8TrapNo;
15290 TRPMEVENT enmType;
15291 RTGCUINT uErrCode;
15292 RTGCPTR uCr2;
15293 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15294 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15295 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15296 TRPMResetTrap(pVCpu);
15297 }
15298
15299 /*
15300 * Log the state.
15301 */
15302# ifdef LOG_ENABLED
15303 iemLogCurInstr(pVCpu, pCtx, true);
15304# endif
15305
15306 /*
15307 * Do the decoding and emulation.
15308 */
15309 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15310 if (rcStrict == VINF_SUCCESS)
15311 rcStrict = iemExecOneInner(pVCpu, true);
15312
15313 /*
15314 * Assert some sanity.
15315 */
15316 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15317
15318 /*
15319 * Log and return.
15320 */
15321 if (rcStrict != VINF_SUCCESS)
15322 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15323 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15324 if (pcInstructions)
15325 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15326 return rcStrict;
15327
15328#else /* Not verification mode */
15329
15330 /*
15331 * See if there is an interrupt pending in TRPM, inject it if we can.
15332 */
15333 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15334# ifdef IEM_VERIFICATION_MODE_FULL
15335 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15336# endif
15337
15338 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15339#if defined(VBOX_WITH_NESTED_HWVIRT)
15340 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15341 if (fIntrEnabled)
15342 {
15343 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15344 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15345 else
15346 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15347 }
15348#else
15349 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15350#endif
15351 if ( fIntrEnabled
15352 && TRPMHasTrap(pVCpu)
15353 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15354 {
15355 uint8_t u8TrapNo;
15356 TRPMEVENT enmType;
15357 RTGCUINT uErrCode;
15358 RTGCPTR uCr2;
15359 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15360 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15361 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15362 TRPMResetTrap(pVCpu);
15363 }
15364
15365 /*
15366 * Initial decoder init w/ prefetch, then setup setjmp.
15367 */
15368 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15369 if (rcStrict == VINF_SUCCESS)
15370 {
15371# ifdef IEM_WITH_SETJMP
15372 jmp_buf JmpBuf;
15373 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15374 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15375 pVCpu->iem.s.cActiveMappings = 0;
15376 if ((rcStrict = setjmp(JmpBuf)) == 0)
15377# endif
15378 {
15379 /*
15380 * The run loop. We limit ourselves to 4096 instructions right now.
15381 */
15382 PVM pVM = pVCpu->CTX_SUFF(pVM);
15383 uint32_t cInstr = 4096;
15384 for (;;)
15385 {
15386 /*
15387 * Log the state.
15388 */
15389# ifdef LOG_ENABLED
15390 iemLogCurInstr(pVCpu, pCtx, true);
15391# endif
15392
15393 /*
15394 * Do the decoding and emulation.
15395 */
15396 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15397 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15398 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15399 {
15400 Assert(pVCpu->iem.s.cActiveMappings == 0);
15401 pVCpu->iem.s.cInstructions++;
15402 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15403 {
15404 uint32_t fCpu = pVCpu->fLocalForcedActions
15405 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15406 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15407 | VMCPU_FF_TLB_FLUSH
15408# ifdef VBOX_WITH_RAW_MODE
15409 | VMCPU_FF_TRPM_SYNC_IDT
15410 | VMCPU_FF_SELM_SYNC_TSS
15411 | VMCPU_FF_SELM_SYNC_GDT
15412 | VMCPU_FF_SELM_SYNC_LDT
15413# endif
15414 | VMCPU_FF_INHIBIT_INTERRUPTS
15415 | VMCPU_FF_BLOCK_NMIS
15416 | VMCPU_FF_UNHALT ));
15417
15418 if (RT_LIKELY( ( !fCpu
15419 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15420 && !pCtx->rflags.Bits.u1IF) )
15421 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15422 {
15423 if (cInstr-- > 0)
15424 {
15425 Assert(pVCpu->iem.s.cActiveMappings == 0);
15426 iemReInitDecoder(pVCpu);
15427 continue;
15428 }
15429 }
15430 }
15431 Assert(pVCpu->iem.s.cActiveMappings == 0);
15432 }
15433 else if (pVCpu->iem.s.cActiveMappings > 0)
15434 iemMemRollback(pVCpu);
15435 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15436 break;
15437 }
15438 }
15439# ifdef IEM_WITH_SETJMP
15440 else
15441 {
15442 if (pVCpu->iem.s.cActiveMappings > 0)
15443 iemMemRollback(pVCpu);
15444 pVCpu->iem.s.cLongJumps++;
15445# ifdef VBOX_WITH_NESTED_HWVIRT
15446 /*
15447 * When a nested-guest causes an exception intercept when fetching memory
15448 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15449 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15450 */
15451 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15452# endif
15453 }
15454 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15455# endif
15456
15457 /*
15458 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15459 */
15460 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15461 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15462# if defined(IEM_VERIFICATION_MODE_FULL)
15463 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15464 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15465 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15466 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15467# endif
15468 }
15469# ifdef VBOX_WITH_NESTED_HWVIRT
15470 else
15471 {
15472 /*
15473 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15474 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15475 */
15476 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15477 }
15478# endif
15479
15480 /*
15481 * Maybe re-enter raw-mode and log.
15482 */
15483# ifdef IN_RC
15484 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15485# endif
15486 if (rcStrict != VINF_SUCCESS)
15487 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15488 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15489 if (pcInstructions)
15490 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15491 return rcStrict;
15492#endif /* Not verification mode */
15493}
15494
15495
15496
15497/**
15498 * Injects a trap, fault, abort, software interrupt or external interrupt.
15499 *
15500 * The parameter list matches TRPMQueryTrapAll pretty closely.
15501 *
15502 * @returns Strict VBox status code.
15503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15504 * @param u8TrapNo The trap number.
15505 * @param enmType What type is it (trap/fault/abort), software
15506 * interrupt or hardware interrupt.
15507 * @param uErrCode The error code if applicable.
15508 * @param uCr2 The CR2 value if applicable.
15509 * @param cbInstr The instruction length (only relevant for
15510 * software interrupts).
15511 */
15512VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15513 uint8_t cbInstr)
15514{
15515 iemInitDecoder(pVCpu, false);
15516#ifdef DBGFTRACE_ENABLED
15517 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15518 u8TrapNo, enmType, uErrCode, uCr2);
15519#endif
15520
15521 uint32_t fFlags;
15522 switch (enmType)
15523 {
15524 case TRPM_HARDWARE_INT:
15525 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15526 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15527 uErrCode = uCr2 = 0;
15528 break;
15529
15530 case TRPM_SOFTWARE_INT:
15531 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15532 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15533 uErrCode = uCr2 = 0;
15534 break;
15535
15536 case TRPM_TRAP:
15537 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15538 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15539 if (u8TrapNo == X86_XCPT_PF)
15540 fFlags |= IEM_XCPT_FLAGS_CR2;
15541 switch (u8TrapNo)
15542 {
15543 case X86_XCPT_DF:
15544 case X86_XCPT_TS:
15545 case X86_XCPT_NP:
15546 case X86_XCPT_SS:
15547 case X86_XCPT_PF:
15548 case X86_XCPT_AC:
15549 fFlags |= IEM_XCPT_FLAGS_ERR;
15550 break;
15551
15552 case X86_XCPT_NMI:
15553 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15554 break;
15555 }
15556 break;
15557
15558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15559 }
15560
15561 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15562}
15563
15564
15565/**
15566 * Injects the active TRPM event.
15567 *
15568 * @returns Strict VBox status code.
15569 * @param pVCpu The cross context virtual CPU structure.
15570 */
15571VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15572{
15573#ifndef IEM_IMPLEMENTS_TASKSWITCH
15574 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15575#else
15576 uint8_t u8TrapNo;
15577 TRPMEVENT enmType;
15578 RTGCUINT uErrCode;
15579 RTGCUINTPTR uCr2;
15580 uint8_t cbInstr;
15581 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15582 if (RT_FAILURE(rc))
15583 return rc;
15584
15585 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15586
15587 /** @todo Are there any other codes that imply the event was successfully
15588 * delivered to the guest? See @bugref{6607}. */
15589 if ( rcStrict == VINF_SUCCESS
15590 || rcStrict == VINF_IEM_RAISED_XCPT)
15591 {
15592 TRPMResetTrap(pVCpu);
15593 }
15594 return rcStrict;
15595#endif
15596}
15597
15598
15599VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15600{
15601 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15602 return VERR_NOT_IMPLEMENTED;
15603}
15604
15605
15606VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15607{
15608 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15609 return VERR_NOT_IMPLEMENTED;
15610}
15611
15612
15613#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15614/**
15615 * Executes a IRET instruction with default operand size.
15616 *
15617 * This is for PATM.
15618 *
15619 * @returns VBox status code.
15620 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15621 * @param pCtxCore The register frame.
15622 */
15623VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15624{
15625 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15626
15627 iemCtxCoreToCtx(pCtx, pCtxCore);
15628 iemInitDecoder(pVCpu);
15629 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15630 if (rcStrict == VINF_SUCCESS)
15631 iemCtxToCtxCore(pCtxCore, pCtx);
15632 else
15633 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15634 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15635 return rcStrict;
15636}
15637#endif
15638
15639
15640/**
15641 * Macro used by the IEMExec* method to check the given instruction length.
15642 *
15643 * Will return on failure!
15644 *
15645 * @param a_cbInstr The given instruction length.
15646 * @param a_cbMin The minimum length.
15647 */
15648#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15649 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15650 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15651
15652
15653/**
15654 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15655 *
15656 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15657 *
15658 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15660 * @param rcStrict The status code to fiddle.
15661 */
15662DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15663{
15664 iemUninitExec(pVCpu);
15665#ifdef IN_RC
15666 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15667 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15668#else
15669 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15670#endif
15671}
15672
15673
15674/**
15675 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15676 *
15677 * This API ASSUMES that the caller has already verified that the guest code is
15678 * allowed to access the I/O port. (The I/O port is in the DX register in the
15679 * guest state.)
15680 *
15681 * @returns Strict VBox status code.
15682 * @param pVCpu The cross context virtual CPU structure.
15683 * @param cbValue The size of the I/O port access (1, 2, or 4).
15684 * @param enmAddrMode The addressing mode.
15685 * @param fRepPrefix Indicates whether a repeat prefix is used
15686 * (doesn't matter which for this instruction).
15687 * @param cbInstr The instruction length in bytes.
15688 * @param iEffSeg The effective segment address.
15689 * @param fIoChecked Whether the access to the I/O port has been
15690 * checked or not. It's typically checked in the
15691 * HM scenario.
15692 */
15693VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15694 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15695{
15696 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15698
15699 /*
15700 * State init.
15701 */
15702 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15703
15704 /*
15705 * Switch orgy for getting to the right handler.
15706 */
15707 VBOXSTRICTRC rcStrict;
15708 if (fRepPrefix)
15709 {
15710 switch (enmAddrMode)
15711 {
15712 case IEMMODE_16BIT:
15713 switch (cbValue)
15714 {
15715 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15716 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15717 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15718 default:
15719 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15720 }
15721 break;
15722
15723 case IEMMODE_32BIT:
15724 switch (cbValue)
15725 {
15726 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15727 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15728 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15729 default:
15730 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15731 }
15732 break;
15733
15734 case IEMMODE_64BIT:
15735 switch (cbValue)
15736 {
15737 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15738 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15739 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15740 default:
15741 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15742 }
15743 break;
15744
15745 default:
15746 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15747 }
15748 }
15749 else
15750 {
15751 switch (enmAddrMode)
15752 {
15753 case IEMMODE_16BIT:
15754 switch (cbValue)
15755 {
15756 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15757 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15758 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15759 default:
15760 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15761 }
15762 break;
15763
15764 case IEMMODE_32BIT:
15765 switch (cbValue)
15766 {
15767 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15768 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15769 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15770 default:
15771 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15772 }
15773 break;
15774
15775 case IEMMODE_64BIT:
15776 switch (cbValue)
15777 {
15778 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15779 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15780 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15781 default:
15782 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15783 }
15784 break;
15785
15786 default:
15787 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15788 }
15789 }
15790
15791 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15792}
15793
15794
15795/**
15796 * Interface for HM and EM for executing string I/O IN (read) instructions.
15797 *
15798 * This API ASSUMES that the caller has already verified that the guest code is
15799 * allowed to access the I/O port. (The I/O port is in the DX register in the
15800 * guest state.)
15801 *
15802 * @returns Strict VBox status code.
15803 * @param pVCpu The cross context virtual CPU structure.
15804 * @param cbValue The size of the I/O port access (1, 2, or 4).
15805 * @param enmAddrMode The addressing mode.
15806 * @param fRepPrefix Indicates whether a repeat prefix is used
15807 * (doesn't matter which for this instruction).
15808 * @param cbInstr The instruction length in bytes.
15809 * @param fIoChecked Whether the access to the I/O port has been
15810 * checked or not. It's typically checked in the
15811 * HM scenario.
15812 */
15813VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15814 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15815{
15816 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15817
15818 /*
15819 * State init.
15820 */
15821 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15822
15823 /*
15824 * Switch orgy for getting to the right handler.
15825 */
15826 VBOXSTRICTRC rcStrict;
15827 if (fRepPrefix)
15828 {
15829 switch (enmAddrMode)
15830 {
15831 case IEMMODE_16BIT:
15832 switch (cbValue)
15833 {
15834 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15835 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15836 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15837 default:
15838 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15839 }
15840 break;
15841
15842 case IEMMODE_32BIT:
15843 switch (cbValue)
15844 {
15845 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15846 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15847 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15848 default:
15849 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15850 }
15851 break;
15852
15853 case IEMMODE_64BIT:
15854 switch (cbValue)
15855 {
15856 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15857 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15858 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15859 default:
15860 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15861 }
15862 break;
15863
15864 default:
15865 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15866 }
15867 }
15868 else
15869 {
15870 switch (enmAddrMode)
15871 {
15872 case IEMMODE_16BIT:
15873 switch (cbValue)
15874 {
15875 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15876 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15877 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15878 default:
15879 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15880 }
15881 break;
15882
15883 case IEMMODE_32BIT:
15884 switch (cbValue)
15885 {
15886 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15887 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15888 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15889 default:
15890 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15891 }
15892 break;
15893
15894 case IEMMODE_64BIT:
15895 switch (cbValue)
15896 {
15897 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15898 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15899 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15900 default:
15901 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15902 }
15903 break;
15904
15905 default:
15906 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15907 }
15908 }
15909
15910 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15911}
15912
15913
15914/**
15915 * Interface for rawmode to write execute an OUT instruction.
15916 *
15917 * @returns Strict VBox status code.
15918 * @param pVCpu The cross context virtual CPU structure.
15919 * @param cbInstr The instruction length in bytes.
15920 * @param u16Port The port to read.
15921 * @param cbReg The register size.
15922 *
15923 * @remarks In ring-0 not all of the state needs to be synced in.
15924 */
15925VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15926{
15927 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15928 Assert(cbReg <= 4 && cbReg != 3);
15929
15930 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15931 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15932 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15933}
15934
15935
15936/**
15937 * Interface for rawmode to write execute an IN instruction.
15938 *
15939 * @returns Strict VBox status code.
15940 * @param pVCpu The cross context virtual CPU structure.
15941 * @param cbInstr The instruction length in bytes.
15942 * @param u16Port The port to read.
15943 * @param cbReg The register size.
15944 */
15945VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15946{
15947 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15948 Assert(cbReg <= 4 && cbReg != 3);
15949
15950 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15951 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15952 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15953}
15954
15955
15956/**
15957 * Interface for HM and EM to write to a CRx register.
15958 *
15959 * @returns Strict VBox status code.
15960 * @param pVCpu The cross context virtual CPU structure.
15961 * @param cbInstr The instruction length in bytes.
15962 * @param iCrReg The control register number (destination).
15963 * @param iGReg The general purpose register number (source).
15964 *
15965 * @remarks In ring-0 not all of the state needs to be synced in.
15966 */
15967VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15968{
15969 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15970 Assert(iCrReg < 16);
15971 Assert(iGReg < 16);
15972
15973 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15974 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15976}
15977
15978
15979/**
15980 * Interface for HM and EM to read from a CRx register.
15981 *
15982 * @returns Strict VBox status code.
15983 * @param pVCpu The cross context virtual CPU structure.
15984 * @param cbInstr The instruction length in bytes.
15985 * @param iGReg The general purpose register number (destination).
15986 * @param iCrReg The control register number (source).
15987 *
15988 * @remarks In ring-0 not all of the state needs to be synced in.
15989 */
15990VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15991{
15992 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15993 Assert(iCrReg < 16);
15994 Assert(iGReg < 16);
15995
15996 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15997 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15998 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15999}
16000
16001
16002/**
16003 * Interface for HM and EM to clear the CR0[TS] bit.
16004 *
16005 * @returns Strict VBox status code.
16006 * @param pVCpu The cross context virtual CPU structure.
16007 * @param cbInstr The instruction length in bytes.
16008 *
16009 * @remarks In ring-0 not all of the state needs to be synced in.
16010 */
16011VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16012{
16013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16014
16015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16016 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16017 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16018}
16019
16020
16021/**
16022 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16023 *
16024 * @returns Strict VBox status code.
16025 * @param pVCpu The cross context virtual CPU structure.
16026 * @param cbInstr The instruction length in bytes.
16027 * @param uValue The value to load into CR0.
16028 *
16029 * @remarks In ring-0 not all of the state needs to be synced in.
16030 */
16031VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16032{
16033 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16034
16035 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16036 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16037 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16038}
16039
16040
16041/**
16042 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16043 *
16044 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16045 *
16046 * @returns Strict VBox status code.
16047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16048 * @param cbInstr The instruction length in bytes.
16049 * @remarks In ring-0 not all of the state needs to be synced in.
16050 * @thread EMT(pVCpu)
16051 */
16052VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16053{
16054 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16055
16056 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16057 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16059}
16060
16061
16062/**
16063 * Interface for HM and EM to emulate the INVLPG instruction.
16064 *
16065 * @param pVCpu The cross context virtual CPU structure.
16066 * @param cbInstr The instruction length in bytes.
16067 * @param GCPtrPage The effective address of the page to invalidate.
16068 *
16069 * @remarks In ring-0 not all of the state needs to be synced in.
16070 */
16071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16072{
16073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16074
16075 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16076 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16078}
16079
16080
16081/**
16082 * Interface for HM and EM to emulate the INVPCID instruction.
16083 *
16084 * @param pVCpu The cross context virtual CPU structure.
16085 * @param cbInstr The instruction length in bytes.
16086 * @param uType The invalidation type.
16087 * @param GCPtrPage The effective address of the INVPCID descriptor.
16088 *
16089 * @remarks In ring-0 not all of the state needs to be synced in.
16090 */
16091VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16092{
16093 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16094
16095 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16096 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16097 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16098}
16099
16100
16101/**
16102 * Checks if IEM is in the process of delivering an event (interrupt or
16103 * exception).
16104 *
16105 * @returns true if we're in the process of raising an interrupt or exception,
16106 * false otherwise.
16107 * @param pVCpu The cross context virtual CPU structure.
16108 * @param puVector Where to store the vector associated with the
16109 * currently delivered event, optional.
16110 * @param pfFlags Where to store th event delivery flags (see
16111 * IEM_XCPT_FLAGS_XXX), optional.
16112 * @param puErr Where to store the error code associated with the
16113 * event, optional.
16114 * @param puCr2 Where to store the CR2 associated with the event,
16115 * optional.
16116 * @remarks The caller should check the flags to determine if the error code and
16117 * CR2 are valid for the event.
16118 */
16119VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16120{
16121 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16122 if (fRaisingXcpt)
16123 {
16124 if (puVector)
16125 *puVector = pVCpu->iem.s.uCurXcpt;
16126 if (pfFlags)
16127 *pfFlags = pVCpu->iem.s.fCurXcpt;
16128 if (puErr)
16129 *puErr = pVCpu->iem.s.uCurXcptErr;
16130 if (puCr2)
16131 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16132 }
16133 return fRaisingXcpt;
16134}
16135
16136#ifdef VBOX_WITH_NESTED_HWVIRT
16137/**
16138 * Interface for HM and EM to emulate the CLGI instruction.
16139 *
16140 * @returns Strict VBox status code.
16141 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16142 * @param cbInstr The instruction length in bytes.
16143 * @thread EMT(pVCpu)
16144 */
16145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16146{
16147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16148
16149 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16150 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16151 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16152}
16153
16154
16155/**
16156 * Interface for HM and EM to emulate the STGI instruction.
16157 *
16158 * @returns Strict VBox status code.
16159 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16160 * @param cbInstr The instruction length in bytes.
16161 * @thread EMT(pVCpu)
16162 */
16163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16164{
16165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16166
16167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16169 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16170}
16171
16172
16173/**
16174 * Interface for HM and EM to emulate the VMLOAD instruction.
16175 *
16176 * @returns Strict VBox status code.
16177 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16178 * @param cbInstr The instruction length in bytes.
16179 * @thread EMT(pVCpu)
16180 */
16181VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16182{
16183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16184
16185 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16187 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16188}
16189
16190
16191/**
16192 * Interface for HM and EM to emulate the VMSAVE instruction.
16193 *
16194 * @returns Strict VBox status code.
16195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16196 * @param cbInstr The instruction length in bytes.
16197 * @thread EMT(pVCpu)
16198 */
16199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16200{
16201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16202
16203 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16204 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16205 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16206}
16207
16208
16209/**
16210 * Interface for HM and EM to emulate the INVLPGA instruction.
16211 *
16212 * @returns Strict VBox status code.
16213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16214 * @param cbInstr The instruction length in bytes.
16215 * @thread EMT(pVCpu)
16216 */
16217VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16218{
16219 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16220
16221 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16222 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16224}
16225
16226
16227/**
16228 * Interface for HM and EM to emulate the VMRUN instruction.
16229 *
16230 * @returns Strict VBox status code.
16231 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16232 * @param cbInstr The instruction length in bytes.
16233 * @thread EMT(pVCpu)
16234 */
16235VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16236{
16237 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16238
16239 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16240 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16241 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16242}
16243
16244
16245/**
16246 * Interface for HM and EM to emulate \#VMEXIT.
16247 *
16248 * @returns Strict VBox status code.
16249 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16250 * @param uExitCode The exit code.
16251 * @param uExitInfo1 The exit info. 1 field.
16252 * @param uExitInfo2 The exit info. 2 field.
16253 * @thread EMT(pVCpu)
16254 */
16255VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16256{
16257 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16258 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16259}
16260#endif /* VBOX_WITH_NESTED_HWVIRT */
16261
16262#ifdef IN_RING3
16263
16264/**
16265 * Handles the unlikely and probably fatal merge cases.
16266 *
16267 * @returns Merged status code.
16268 * @param rcStrict Current EM status code.
16269 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16270 * with @a rcStrict.
16271 * @param iMemMap The memory mapping index. For error reporting only.
16272 * @param pVCpu The cross context virtual CPU structure of the calling
16273 * thread, for error reporting only.
16274 */
16275DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16276 unsigned iMemMap, PVMCPU pVCpu)
16277{
16278 if (RT_FAILURE_NP(rcStrict))
16279 return rcStrict;
16280
16281 if (RT_FAILURE_NP(rcStrictCommit))
16282 return rcStrictCommit;
16283
16284 if (rcStrict == rcStrictCommit)
16285 return rcStrictCommit;
16286
16287 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16288 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16289 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16290 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16291 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16292 return VERR_IOM_FF_STATUS_IPE;
16293}
16294
16295
16296/**
16297 * Helper for IOMR3ProcessForceFlag.
16298 *
16299 * @returns Merged status code.
16300 * @param rcStrict Current EM status code.
16301 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16302 * with @a rcStrict.
16303 * @param iMemMap The memory mapping index. For error reporting only.
16304 * @param pVCpu The cross context virtual CPU structure of the calling
16305 * thread, for error reporting only.
16306 */
16307DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16308{
16309 /* Simple. */
16310 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16311 return rcStrictCommit;
16312
16313 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16314 return rcStrict;
16315
16316 /* EM scheduling status codes. */
16317 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16318 && rcStrict <= VINF_EM_LAST))
16319 {
16320 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16321 && rcStrictCommit <= VINF_EM_LAST))
16322 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16323 }
16324
16325 /* Unlikely */
16326 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16327}
16328
16329
16330/**
16331 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16332 *
16333 * @returns Merge between @a rcStrict and what the commit operation returned.
16334 * @param pVM The cross context VM structure.
16335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16336 * @param rcStrict The status code returned by ring-0 or raw-mode.
16337 */
16338VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16339{
16340 /*
16341 * Reset the pending commit.
16342 */
16343 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16344 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16345 ("%#x %#x %#x\n",
16346 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16347 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16348
16349 /*
16350 * Commit the pending bounce buffers (usually just one).
16351 */
16352 unsigned cBufs = 0;
16353 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16354 while (iMemMap-- > 0)
16355 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16356 {
16357 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16358 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16359 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16360
16361 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16362 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16363 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16364
16365 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16366 {
16367 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16369 pbBuf,
16370 cbFirst,
16371 PGMACCESSORIGIN_IEM);
16372 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16373 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16374 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16375 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16376 }
16377
16378 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16379 {
16380 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16382 pbBuf + cbFirst,
16383 cbSecond,
16384 PGMACCESSORIGIN_IEM);
16385 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16386 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16387 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16388 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16389 }
16390 cBufs++;
16391 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16392 }
16393
16394 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16395 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16396 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16397 pVCpu->iem.s.cActiveMappings = 0;
16398 return rcStrict;
16399}
16400
16401#endif /* IN_RING3 */
16402
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette