VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 68226

Last change on this file since 68226 was 67786, checked in by vboxsync, 7 years ago

IEM: Also clear TF, AC as documented by AMD/Intel when dispatching real-mode interrupts.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 634.9 KB
Line 
1/* $Id: IEMAll.cpp 67786 2017-07-05 08:48:17Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT
227} IEMXCPTCLASS;
228
229
230/*********************************************************************************************************************************
231* Defined Constants And Macros *
232*********************************************************************************************************************************/
233/** @def IEM_WITH_SETJMP
234 * Enables alternative status code handling using setjmps.
235 *
236 * This adds a bit of expense via the setjmp() call since it saves all the
237 * non-volatile registers. However, it eliminates return code checks and allows
238 * for more optimal return value passing (return regs instead of stack buffer).
239 */
240#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
241# define IEM_WITH_SETJMP
242#endif
243
244/** Temporary hack to disable the double execution. Will be removed in favor
245 * of a dedicated execution mode in EM. */
246//#define IEM_VERIFICATION_MODE_NO_REM
247
248/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
249 * due to GCC lacking knowledge about the value range of a switch. */
250#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
251
252/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
253#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
254
255/**
256 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
257 * occation.
258 */
259#ifdef LOG_ENABLED
260# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
261 do { \
262 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
263 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
264 } while (0)
265#else
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
268#endif
269
270/**
271 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
272 * occation using the supplied logger statement.
273 *
274 * @param a_LoggerArgs What to log on failure.
275 */
276#ifdef LOG_ENABLED
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 do { \
279 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
280 /*LogFunc(a_LoggerArgs);*/ \
281 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
282 } while (0)
283#else
284# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
285 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
286#endif
287
288/**
289 * Call an opcode decoder function.
290 *
291 * We're using macors for this so that adding and removing parameters can be
292 * done as we please. See FNIEMOP_DEF.
293 */
294#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
295
296/**
297 * Call a common opcode decoder function taking one extra argument.
298 *
299 * We're using macors for this so that adding and removing parameters can be
300 * done as we please. See FNIEMOP_DEF_1.
301 */
302#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
303
304/**
305 * Call a common opcode decoder function taking one extra argument.
306 *
307 * We're using macors for this so that adding and removing parameters can be
308 * done as we please. See FNIEMOP_DEF_1.
309 */
310#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
311
312/**
313 * Check if we're currently executing in real or virtual 8086 mode.
314 *
315 * @returns @c true if it is, @c false if not.
316 * @param a_pVCpu The IEM state of the current CPU.
317 */
318#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
319
320/**
321 * Check if we're currently executing in virtual 8086 mode.
322 *
323 * @returns @c true if it is, @c false if not.
324 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
325 */
326#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
327
328/**
329 * Check if we're currently executing in long mode.
330 *
331 * @returns @c true if it is, @c false if not.
332 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
333 */
334#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
335
336/**
337 * Check if we're currently executing in real mode.
338 *
339 * @returns @c true if it is, @c false if not.
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
343
344/**
345 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
346 * @returns PCCPUMFEATURES
347 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
348 */
349#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
350
351/**
352 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
353 * @returns PCCPUMFEATURES
354 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
355 */
356#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
357
358/**
359 * Evaluates to true if we're presenting an Intel CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
362
363/**
364 * Evaluates to true if we're presenting an AMD CPU to the guest.
365 */
366#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
367
368/**
369 * Check if the address is canonical.
370 */
371#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
372
373/**
374 * Gets the effective VEX.VVVV value.
375 *
376 * The 4th bit is ignored if not 64-bit code.
377 * @returns effective V-register value.
378 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
379 */
380#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
381 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
382
383/** @def IEM_USE_UNALIGNED_DATA_ACCESS
384 * Use unaligned accesses instead of elaborate byte assembly. */
385#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
386# define IEM_USE_UNALIGNED_DATA_ACCESS
387#endif
388
389#ifdef VBOX_WITH_NESTED_HWVIRT
390/**
391 * Check the common SVM instruction preconditions.
392 */
393# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
394 do { \
395 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
396 { \
397 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
398 return iemRaiseUndefinedOpcode(pVCpu); \
399 } \
400 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
401 { \
402 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
403 return iemRaiseUndefinedOpcode(pVCpu); \
404 } \
405 if (pVCpu->iem.s.uCpl != 0) \
406 { \
407 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
408 return iemRaiseGeneralProtectionFault0(pVCpu); \
409 } \
410 } while (0)
411
412/**
413 * Check if an SVM is enabled.
414 */
415# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
416
417/**
418 * Check if an SVM control/instruction intercept is set.
419 */
420# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
421
422/**
423 * Check if an SVM read CRx intercept is set.
424 */
425# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
426
427/**
428 * Check if an SVM write CRx intercept is set.
429 */
430# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
431
432/**
433 * Check if an SVM read DRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
436
437/**
438 * Check if an SVM write DRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
441
442/**
443 * Check if an SVM exception intercept is set.
444 */
445# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
446
447/**
448 * Invokes the SVM \#VMEXIT handler for the nested-guest.
449 */
450# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
451 do \
452 { \
453 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
454 } while (0)
455
456/**
457 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
458 * corresponding decode assist information.
459 */
460# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
461 do \
462 { \
463 uint64_t uExitInfo1; \
464 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
465 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
466 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
467 else \
468 uExitInfo1 = 0; \
469 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
470 } while (0)
471
472#else
473# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
474# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
475# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
476# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
477# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
479# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
481# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
482# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
483
484#endif /* VBOX_WITH_NESTED_HWVIRT */
485
486
487/*********************************************************************************************************************************
488* Global Variables *
489*********************************************************************************************************************************/
490extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
491
492
493/** Function table for the ADD instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
495{
496 iemAImpl_add_u8, iemAImpl_add_u8_locked,
497 iemAImpl_add_u16, iemAImpl_add_u16_locked,
498 iemAImpl_add_u32, iemAImpl_add_u32_locked,
499 iemAImpl_add_u64, iemAImpl_add_u64_locked
500};
501
502/** Function table for the ADC instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
504{
505 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
506 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
507 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
508 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
509};
510
511/** Function table for the SUB instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
513{
514 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
515 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
516 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
517 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
518};
519
520/** Function table for the SBB instruction. */
521IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
522{
523 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
524 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
525 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
526 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
527};
528
529/** Function table for the OR instruction. */
530IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
531{
532 iemAImpl_or_u8, iemAImpl_or_u8_locked,
533 iemAImpl_or_u16, iemAImpl_or_u16_locked,
534 iemAImpl_or_u32, iemAImpl_or_u32_locked,
535 iemAImpl_or_u64, iemAImpl_or_u64_locked
536};
537
538/** Function table for the XOR instruction. */
539IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
540{
541 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
542 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
543 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
544 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
545};
546
547/** Function table for the AND instruction. */
548IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
549{
550 iemAImpl_and_u8, iemAImpl_and_u8_locked,
551 iemAImpl_and_u16, iemAImpl_and_u16_locked,
552 iemAImpl_and_u32, iemAImpl_and_u32_locked,
553 iemAImpl_and_u64, iemAImpl_and_u64_locked
554};
555
556/** Function table for the CMP instruction.
557 * @remarks Making operand order ASSUMPTIONS.
558 */
559IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
560{
561 iemAImpl_cmp_u8, NULL,
562 iemAImpl_cmp_u16, NULL,
563 iemAImpl_cmp_u32, NULL,
564 iemAImpl_cmp_u64, NULL
565};
566
567/** Function table for the TEST instruction.
568 * @remarks Making operand order ASSUMPTIONS.
569 */
570IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
571{
572 iemAImpl_test_u8, NULL,
573 iemAImpl_test_u16, NULL,
574 iemAImpl_test_u32, NULL,
575 iemAImpl_test_u64, NULL
576};
577
578/** Function table for the BT instruction. */
579IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
580{
581 NULL, NULL,
582 iemAImpl_bt_u16, NULL,
583 iemAImpl_bt_u32, NULL,
584 iemAImpl_bt_u64, NULL
585};
586
587/** Function table for the BTC instruction. */
588IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
589{
590 NULL, NULL,
591 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
592 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
593 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
594};
595
596/** Function table for the BTR instruction. */
597IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
598{
599 NULL, NULL,
600 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
601 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
602 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
603};
604
605/** Function table for the BTS instruction. */
606IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
607{
608 NULL, NULL,
609 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
610 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
611 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
612};
613
614/** Function table for the BSF instruction. */
615IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
616{
617 NULL, NULL,
618 iemAImpl_bsf_u16, NULL,
619 iemAImpl_bsf_u32, NULL,
620 iemAImpl_bsf_u64, NULL
621};
622
623/** Function table for the BSR instruction. */
624IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
625{
626 NULL, NULL,
627 iemAImpl_bsr_u16, NULL,
628 iemAImpl_bsr_u32, NULL,
629 iemAImpl_bsr_u64, NULL
630};
631
632/** Function table for the IMUL instruction. */
633IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
634{
635 NULL, NULL,
636 iemAImpl_imul_two_u16, NULL,
637 iemAImpl_imul_two_u32, NULL,
638 iemAImpl_imul_two_u64, NULL
639};
640
641/** Group 1 /r lookup table. */
642IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
643{
644 &g_iemAImpl_add,
645 &g_iemAImpl_or,
646 &g_iemAImpl_adc,
647 &g_iemAImpl_sbb,
648 &g_iemAImpl_and,
649 &g_iemAImpl_sub,
650 &g_iemAImpl_xor,
651 &g_iemAImpl_cmp
652};
653
654/** Function table for the INC instruction. */
655IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
656{
657 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
658 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
659 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
660 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
661};
662
663/** Function table for the DEC instruction. */
664IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
665{
666 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
667 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
668 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
669 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
670};
671
672/** Function table for the NEG instruction. */
673IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
674{
675 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
676 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
677 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
678 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
679};
680
681/** Function table for the NOT instruction. */
682IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
683{
684 iemAImpl_not_u8, iemAImpl_not_u8_locked,
685 iemAImpl_not_u16, iemAImpl_not_u16_locked,
686 iemAImpl_not_u32, iemAImpl_not_u32_locked,
687 iemAImpl_not_u64, iemAImpl_not_u64_locked
688};
689
690
691/** Function table for the ROL instruction. */
692IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
693{
694 iemAImpl_rol_u8,
695 iemAImpl_rol_u16,
696 iemAImpl_rol_u32,
697 iemAImpl_rol_u64
698};
699
700/** Function table for the ROR instruction. */
701IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
702{
703 iemAImpl_ror_u8,
704 iemAImpl_ror_u16,
705 iemAImpl_ror_u32,
706 iemAImpl_ror_u64
707};
708
709/** Function table for the RCL instruction. */
710IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
711{
712 iemAImpl_rcl_u8,
713 iemAImpl_rcl_u16,
714 iemAImpl_rcl_u32,
715 iemAImpl_rcl_u64
716};
717
718/** Function table for the RCR instruction. */
719IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
720{
721 iemAImpl_rcr_u8,
722 iemAImpl_rcr_u16,
723 iemAImpl_rcr_u32,
724 iemAImpl_rcr_u64
725};
726
727/** Function table for the SHL instruction. */
728IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
729{
730 iemAImpl_shl_u8,
731 iemAImpl_shl_u16,
732 iemAImpl_shl_u32,
733 iemAImpl_shl_u64
734};
735
736/** Function table for the SHR instruction. */
737IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
738{
739 iemAImpl_shr_u8,
740 iemAImpl_shr_u16,
741 iemAImpl_shr_u32,
742 iemAImpl_shr_u64
743};
744
745/** Function table for the SAR instruction. */
746IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
747{
748 iemAImpl_sar_u8,
749 iemAImpl_sar_u16,
750 iemAImpl_sar_u32,
751 iemAImpl_sar_u64
752};
753
754
755/** Function table for the MUL instruction. */
756IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
757{
758 iemAImpl_mul_u8,
759 iemAImpl_mul_u16,
760 iemAImpl_mul_u32,
761 iemAImpl_mul_u64
762};
763
764/** Function table for the IMUL instruction working implicitly on rAX. */
765IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
766{
767 iemAImpl_imul_u8,
768 iemAImpl_imul_u16,
769 iemAImpl_imul_u32,
770 iemAImpl_imul_u64
771};
772
773/** Function table for the DIV instruction. */
774IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
775{
776 iemAImpl_div_u8,
777 iemAImpl_div_u16,
778 iemAImpl_div_u32,
779 iemAImpl_div_u64
780};
781
782/** Function table for the MUL instruction. */
783IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
784{
785 iemAImpl_idiv_u8,
786 iemAImpl_idiv_u16,
787 iemAImpl_idiv_u32,
788 iemAImpl_idiv_u64
789};
790
791/** Function table for the SHLD instruction */
792IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
793{
794 iemAImpl_shld_u16,
795 iemAImpl_shld_u32,
796 iemAImpl_shld_u64,
797};
798
799/** Function table for the SHRD instruction */
800IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
801{
802 iemAImpl_shrd_u16,
803 iemAImpl_shrd_u32,
804 iemAImpl_shrd_u64,
805};
806
807
808/** Function table for the PUNPCKLBW instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
810/** Function table for the PUNPCKLBD instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
812/** Function table for the PUNPCKLDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
814/** Function table for the PUNPCKLQDQ instruction */
815IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
816
817/** Function table for the PUNPCKHBW instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
819/** Function table for the PUNPCKHBD instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
821/** Function table for the PUNPCKHDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
823/** Function table for the PUNPCKHQDQ instruction */
824IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
825
826/** Function table for the PXOR instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
828/** Function table for the PCMPEQB instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
830/** Function table for the PCMPEQW instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
832/** Function table for the PCMPEQD instruction */
833IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
834
835
836#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
837/** What IEM just wrote. */
838uint8_t g_abIemWrote[256];
839/** How much IEM just wrote. */
840size_t g_cbIemWrote;
841#endif
842
843
844/*********************************************************************************************************************************
845* Internal Functions *
846*********************************************************************************************************************************/
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
851/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
852IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
862IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
863IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
864#ifdef IEM_WITH_SETJMP
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
870#endif
871
872IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
873IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
886IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
887IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
888
889#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
890IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
891#endif
892IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
893IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
894
895#ifdef VBOX_WITH_NESTED_HWVIRT
896IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
897 uint64_t uExitInfo2);
898IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
899 uint32_t uErr, uint64_t uCr2);
900#endif
901
902/**
903 * Sets the pass up status.
904 *
905 * @returns VINF_SUCCESS.
906 * @param pVCpu The cross context virtual CPU structure of the
907 * calling thread.
908 * @param rcPassUp The pass up status. Must be informational.
909 * VINF_SUCCESS is not allowed.
910 */
911IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
912{
913 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
914
915 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
916 if (rcOldPassUp == VINF_SUCCESS)
917 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
918 /* If both are EM scheduling codes, use EM priority rules. */
919 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
920 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
921 {
922 if (rcPassUp < rcOldPassUp)
923 {
924 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
925 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
926 }
927 else
928 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
929 }
930 /* Override EM scheduling with specific status code. */
931 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
932 {
933 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
934 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
935 }
936 /* Don't override specific status code, first come first served. */
937 else
938 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
939 return VINF_SUCCESS;
940}
941
942
943/**
944 * Calculates the CPU mode.
945 *
946 * This is mainly for updating IEMCPU::enmCpuMode.
947 *
948 * @returns CPU mode.
949 * @param pCtx The register context for the CPU.
950 */
951DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
952{
953 if (CPUMIsGuestIn64BitCodeEx(pCtx))
954 return IEMMODE_64BIT;
955 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
956 return IEMMODE_32BIT;
957 return IEMMODE_16BIT;
958}
959
960
961/**
962 * Initializes the execution state.
963 *
964 * @param pVCpu The cross context virtual CPU structure of the
965 * calling thread.
966 * @param fBypassHandlers Whether to bypass access handlers.
967 *
968 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
969 * side-effects in strict builds.
970 */
971DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
972{
973 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
974
975 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
976
977#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
986#endif
987
988#ifdef VBOX_WITH_RAW_MODE_NOT_R0
989 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
990#endif
991 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
992 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
993#ifdef VBOX_STRICT
994 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
995 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
996 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
998 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
999 pVCpu->iem.s.uRexReg = 127;
1000 pVCpu->iem.s.uRexB = 127;
1001 pVCpu->iem.s.uRexIndex = 127;
1002 pVCpu->iem.s.iEffSeg = 127;
1003 pVCpu->iem.s.idxPrefix = 127;
1004 pVCpu->iem.s.uVex3rdReg = 127;
1005 pVCpu->iem.s.uVexLength = 127;
1006 pVCpu->iem.s.fEvexStuff = 127;
1007 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1008# ifdef IEM_WITH_CODE_TLB
1009 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1010 pVCpu->iem.s.pbInstrBuf = NULL;
1011 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1012 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1013 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1014 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1015# else
1016 pVCpu->iem.s.offOpcode = 127;
1017 pVCpu->iem.s.cbOpcode = 127;
1018# endif
1019#endif
1020
1021 pVCpu->iem.s.cActiveMappings = 0;
1022 pVCpu->iem.s.iNextMapping = 0;
1023 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1024 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1025#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1026 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1027 && pCtx->cs.u64Base == 0
1028 && pCtx->cs.u32Limit == UINT32_MAX
1029 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1030 if (!pVCpu->iem.s.fInPatchCode)
1031 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1032#endif
1033
1034#ifdef IEM_VERIFICATION_MODE_FULL
1035 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1036 pVCpu->iem.s.fNoRem = true;
1037#endif
1038}
1039
1040#ifdef VBOX_WITH_NESTED_HWVIRT
1041/**
1042 * Performs a minimal reinitialization of the execution state.
1043 *
1044 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1045 * 'world-switch' types operations on the CPU. Currently only nested
1046 * hardware-virtualization uses it.
1047 *
1048 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1049 */
1050IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1051{
1052 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1053 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1054 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1055
1056 pVCpu->iem.s.uCpl = uCpl;
1057 pVCpu->iem.s.enmCpuMode = enmMode;
1058 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1059 pVCpu->iem.s.enmEffAddrMode = enmMode;
1060 if (enmMode != IEMMODE_64BIT)
1061 {
1062 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1063 pVCpu->iem.s.enmEffOpSize = enmMode;
1064 }
1065 else
1066 {
1067 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1068 pVCpu->iem.s.enmEffOpSize = enmMode;
1069 }
1070 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1071#ifndef IEM_WITH_CODE_TLB
1072 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1073 pVCpu->iem.s.offOpcode = 0;
1074 pVCpu->iem.s.cbOpcode = 0;
1075#endif
1076}
1077#endif
1078
1079/**
1080 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1081 *
1082 * @param pVCpu The cross context virtual CPU structure of the
1083 * calling thread.
1084 */
1085DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1086{
1087 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1088#ifdef IEM_VERIFICATION_MODE_FULL
1089 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1090#endif
1091#ifdef VBOX_STRICT
1092# ifdef IEM_WITH_CODE_TLB
1093 NOREF(pVCpu);
1094# else
1095 pVCpu->iem.s.cbOpcode = 0;
1096# endif
1097#else
1098 NOREF(pVCpu);
1099#endif
1100}
1101
1102
1103/**
1104 * Initializes the decoder state.
1105 *
1106 * iemReInitDecoder is mostly a copy of this function.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure of the
1109 * calling thread.
1110 * @param fBypassHandlers Whether to bypass access handlers.
1111 */
1112DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1113{
1114 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1115
1116 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1117
1118#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1127#endif
1128
1129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1130 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1131#endif
1132 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1133#ifdef IEM_VERIFICATION_MODE_FULL
1134 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1135 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1136#endif
1137 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1138 pVCpu->iem.s.enmCpuMode = enmMode;
1139 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1140 pVCpu->iem.s.enmEffAddrMode = enmMode;
1141 if (enmMode != IEMMODE_64BIT)
1142 {
1143 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1144 pVCpu->iem.s.enmEffOpSize = enmMode;
1145 }
1146 else
1147 {
1148 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1149 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1150 }
1151 pVCpu->iem.s.fPrefixes = 0;
1152 pVCpu->iem.s.uRexReg = 0;
1153 pVCpu->iem.s.uRexB = 0;
1154 pVCpu->iem.s.uRexIndex = 0;
1155 pVCpu->iem.s.idxPrefix = 0;
1156 pVCpu->iem.s.uVex3rdReg = 0;
1157 pVCpu->iem.s.uVexLength = 0;
1158 pVCpu->iem.s.fEvexStuff = 0;
1159 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1160#ifdef IEM_WITH_CODE_TLB
1161 pVCpu->iem.s.pbInstrBuf = NULL;
1162 pVCpu->iem.s.offInstrNextByte = 0;
1163 pVCpu->iem.s.offCurInstrStart = 0;
1164# ifdef VBOX_STRICT
1165 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1166 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1167 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1168# endif
1169#else
1170 pVCpu->iem.s.offOpcode = 0;
1171 pVCpu->iem.s.cbOpcode = 0;
1172#endif
1173 pVCpu->iem.s.cActiveMappings = 0;
1174 pVCpu->iem.s.iNextMapping = 0;
1175 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1176 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1177#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1178 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1179 && pCtx->cs.u64Base == 0
1180 && pCtx->cs.u32Limit == UINT32_MAX
1181 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1182 if (!pVCpu->iem.s.fInPatchCode)
1183 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1184#endif
1185
1186#ifdef DBGFTRACE_ENABLED
1187 switch (enmMode)
1188 {
1189 case IEMMODE_64BIT:
1190 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1191 break;
1192 case IEMMODE_32BIT:
1193 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1194 break;
1195 case IEMMODE_16BIT:
1196 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1197 break;
1198 }
1199#endif
1200}
1201
1202
1203/**
1204 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1205 *
1206 * This is mostly a copy of iemInitDecoder.
1207 *
1208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1209 */
1210DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1211{
1212 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1213
1214 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1215
1216#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1225#endif
1226
1227 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1228#ifdef IEM_VERIFICATION_MODE_FULL
1229 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1230 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1231#endif
1232 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1233 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1234 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1235 pVCpu->iem.s.enmEffAddrMode = enmMode;
1236 if (enmMode != IEMMODE_64BIT)
1237 {
1238 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1239 pVCpu->iem.s.enmEffOpSize = enmMode;
1240 }
1241 else
1242 {
1243 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1244 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1245 }
1246 pVCpu->iem.s.fPrefixes = 0;
1247 pVCpu->iem.s.uRexReg = 0;
1248 pVCpu->iem.s.uRexB = 0;
1249 pVCpu->iem.s.uRexIndex = 0;
1250 pVCpu->iem.s.idxPrefix = 0;
1251 pVCpu->iem.s.uVex3rdReg = 0;
1252 pVCpu->iem.s.uVexLength = 0;
1253 pVCpu->iem.s.fEvexStuff = 0;
1254 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1255#ifdef IEM_WITH_CODE_TLB
1256 if (pVCpu->iem.s.pbInstrBuf)
1257 {
1258 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1259 - pVCpu->iem.s.uInstrBufPc;
1260 if (off < pVCpu->iem.s.cbInstrBufTotal)
1261 {
1262 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1263 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1264 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1265 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1266 else
1267 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1268 }
1269 else
1270 {
1271 pVCpu->iem.s.pbInstrBuf = NULL;
1272 pVCpu->iem.s.offInstrNextByte = 0;
1273 pVCpu->iem.s.offCurInstrStart = 0;
1274 pVCpu->iem.s.cbInstrBuf = 0;
1275 pVCpu->iem.s.cbInstrBufTotal = 0;
1276 }
1277 }
1278 else
1279 {
1280 pVCpu->iem.s.offInstrNextByte = 0;
1281 pVCpu->iem.s.offCurInstrStart = 0;
1282 pVCpu->iem.s.cbInstrBuf = 0;
1283 pVCpu->iem.s.cbInstrBufTotal = 0;
1284 }
1285#else
1286 pVCpu->iem.s.cbOpcode = 0;
1287 pVCpu->iem.s.offOpcode = 0;
1288#endif
1289 Assert(pVCpu->iem.s.cActiveMappings == 0);
1290 pVCpu->iem.s.iNextMapping = 0;
1291 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1292 Assert(pVCpu->iem.s.fBypassHandlers == false);
1293#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1294 if (!pVCpu->iem.s.fInPatchCode)
1295 { /* likely */ }
1296 else
1297 {
1298 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1299 && pCtx->cs.u64Base == 0
1300 && pCtx->cs.u32Limit == UINT32_MAX
1301 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1302 if (!pVCpu->iem.s.fInPatchCode)
1303 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1304 }
1305#endif
1306
1307#ifdef DBGFTRACE_ENABLED
1308 switch (enmMode)
1309 {
1310 case IEMMODE_64BIT:
1311 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1312 break;
1313 case IEMMODE_32BIT:
1314 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1315 break;
1316 case IEMMODE_16BIT:
1317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1318 break;
1319 }
1320#endif
1321}
1322
1323
1324
1325/**
1326 * Prefetch opcodes the first time when starting executing.
1327 *
1328 * @returns Strict VBox status code.
1329 * @param pVCpu The cross context virtual CPU structure of the
1330 * calling thread.
1331 * @param fBypassHandlers Whether to bypass access handlers.
1332 */
1333IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1334{
1335#ifdef IEM_VERIFICATION_MODE_FULL
1336 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1337#endif
1338 iemInitDecoder(pVCpu, fBypassHandlers);
1339
1340#ifdef IEM_WITH_CODE_TLB
1341 /** @todo Do ITLB lookup here. */
1342
1343#else /* !IEM_WITH_CODE_TLB */
1344
1345 /*
1346 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1347 *
1348 * First translate CS:rIP to a physical address.
1349 */
1350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1351 uint32_t cbToTryRead;
1352 RTGCPTR GCPtrPC;
1353 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1354 {
1355 cbToTryRead = PAGE_SIZE;
1356 GCPtrPC = pCtx->rip;
1357 if (IEM_IS_CANONICAL(GCPtrPC))
1358 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1359 else
1360 return iemRaiseGeneralProtectionFault0(pVCpu);
1361 }
1362 else
1363 {
1364 uint32_t GCPtrPC32 = pCtx->eip;
1365 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1366 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1367 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1368 else
1369 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1370 if (cbToTryRead) { /* likely */ }
1371 else /* overflowed */
1372 {
1373 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1374 cbToTryRead = UINT32_MAX;
1375 }
1376 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1377 Assert(GCPtrPC <= UINT32_MAX);
1378 }
1379
1380# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1381 /* Allow interpretation of patch manager code blocks since they can for
1382 instance throw #PFs for perfectly good reasons. */
1383 if (pVCpu->iem.s.fInPatchCode)
1384 {
1385 size_t cbRead = 0;
1386 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1387 AssertRCReturn(rc, rc);
1388 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1389 return VINF_SUCCESS;
1390 }
1391# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1392
1393 RTGCPHYS GCPhys;
1394 uint64_t fFlags;
1395 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1396 if (RT_SUCCESS(rc)) { /* probable */ }
1397 else
1398 {
1399 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1400 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1401 }
1402 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1403 else
1404 {
1405 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1407 }
1408 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1409 else
1410 {
1411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1412 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1413 }
1414 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1415 /** @todo Check reserved bits and such stuff. PGM is better at doing
1416 * that, so do it when implementing the guest virtual address
1417 * TLB... */
1418
1419# ifdef IEM_VERIFICATION_MODE_FULL
1420 /*
1421 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1422 * instruction.
1423 */
1424 /** @todo optimize this differently by not using PGMPhysRead. */
1425 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1426 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1427 if ( offPrevOpcodes < cbOldOpcodes
1428 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1429 {
1430 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1431 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1432 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1433 pVCpu->iem.s.cbOpcode = cbNew;
1434 return VINF_SUCCESS;
1435 }
1436# endif
1437
1438 /*
1439 * Read the bytes at this address.
1440 */
1441 PVM pVM = pVCpu->CTX_SUFF(pVM);
1442# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1443 size_t cbActual;
1444 if ( PATMIsEnabled(pVM)
1445 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1446 {
1447 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1448 Assert(cbActual > 0);
1449 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1450 }
1451 else
1452# endif
1453 {
1454 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1455 if (cbToTryRead > cbLeftOnPage)
1456 cbToTryRead = cbLeftOnPage;
1457 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1458 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1459
1460 if (!pVCpu->iem.s.fBypassHandlers)
1461 {
1462 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1463 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1464 { /* likely */ }
1465 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1466 {
1467 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1468 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1469 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1470 }
1471 else
1472 {
1473 Log((RT_SUCCESS(rcStrict)
1474 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1475 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1476 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1477 return rcStrict;
1478 }
1479 }
1480 else
1481 {
1482 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1483 if (RT_SUCCESS(rc))
1484 { /* likely */ }
1485 else
1486 {
1487 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1488 GCPtrPC, GCPhys, rc, cbToTryRead));
1489 return rc;
1490 }
1491 }
1492 pVCpu->iem.s.cbOpcode = cbToTryRead;
1493 }
1494#endif /* !IEM_WITH_CODE_TLB */
1495 return VINF_SUCCESS;
1496}
1497
1498
1499/**
1500 * Invalidates the IEM TLBs.
1501 *
1502 * This is called internally as well as by PGM when moving GC mappings.
1503 *
1504 * @returns
1505 * @param pVCpu The cross context virtual CPU structure of the calling
1506 * thread.
1507 * @param fVmm Set when PGM calls us with a remapping.
1508 */
1509VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1510{
1511#ifdef IEM_WITH_CODE_TLB
1512 pVCpu->iem.s.cbInstrBufTotal = 0;
1513 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1514 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1515 { /* very likely */ }
1516 else
1517 {
1518 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1519 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1520 while (i-- > 0)
1521 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1522 }
1523#endif
1524
1525#ifdef IEM_WITH_DATA_TLB
1526 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1527 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1528 { /* very likely */ }
1529 else
1530 {
1531 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1532 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1533 while (i-- > 0)
1534 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1535 }
1536#endif
1537 NOREF(pVCpu); NOREF(fVmm);
1538}
1539
1540
1541/**
1542 * Invalidates a page in the TLBs.
1543 *
1544 * @param pVCpu The cross context virtual CPU structure of the calling
1545 * thread.
1546 * @param GCPtr The address of the page to invalidate
1547 */
1548VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1549{
1550#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1551 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1552 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1553 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1554 uintptr_t idx = (uint8_t)GCPtr;
1555
1556# ifdef IEM_WITH_CODE_TLB
1557 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1558 {
1559 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1560 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1561 pVCpu->iem.s.cbInstrBufTotal = 0;
1562 }
1563# endif
1564
1565# ifdef IEM_WITH_DATA_TLB
1566 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1567 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1568# endif
1569#else
1570 NOREF(pVCpu); NOREF(GCPtr);
1571#endif
1572}
1573
1574
1575/**
1576 * Invalidates the host physical aspects of the IEM TLBs.
1577 *
1578 * This is called internally as well as by PGM when moving GC mappings.
1579 *
1580 * @param pVCpu The cross context virtual CPU structure of the calling
1581 * thread.
1582 */
1583VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1584{
1585#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1586 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1587
1588# ifdef IEM_WITH_CODE_TLB
1589 pVCpu->iem.s.cbInstrBufTotal = 0;
1590# endif
1591 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1592 if (uTlbPhysRev != 0)
1593 {
1594 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1595 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1596 }
1597 else
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1601
1602 unsigned i;
1603# ifdef IEM_WITH_CODE_TLB
1604 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1605 while (i-- > 0)
1606 {
1607 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1608 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1609 }
1610# endif
1611# ifdef IEM_WITH_DATA_TLB
1612 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1613 while (i-- > 0)
1614 {
1615 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1616 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1617 }
1618# endif
1619 }
1620#else
1621 NOREF(pVCpu);
1622#endif
1623}
1624
1625
1626/**
1627 * Invalidates the host physical aspects of the IEM TLBs.
1628 *
1629 * This is called internally as well as by PGM when moving GC mappings.
1630 *
1631 * @param pVM The cross context VM structure.
1632 *
1633 * @remarks Caller holds the PGM lock.
1634 */
1635VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1636{
1637 RT_NOREF_PV(pVM);
1638}
1639
1640#ifdef IEM_WITH_CODE_TLB
1641
1642/**
1643 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1644 * failure and jumps.
1645 *
1646 * We end up here for a number of reasons:
1647 * - pbInstrBuf isn't yet initialized.
1648 * - Advancing beyond the buffer boundrary (e.g. cross page).
1649 * - Advancing beyond the CS segment limit.
1650 * - Fetching from non-mappable page (e.g. MMIO).
1651 *
1652 * @param pVCpu The cross context virtual CPU structure of the
1653 * calling thread.
1654 * @param pvDst Where to return the bytes.
1655 * @param cbDst Number of bytes to read.
1656 *
1657 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1658 */
1659IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1660{
1661#ifdef IN_RING3
1662//__debugbreak();
1663 for (;;)
1664 {
1665 Assert(cbDst <= 8);
1666 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1667
1668 /*
1669 * We might have a partial buffer match, deal with that first to make the
1670 * rest simpler. This is the first part of the cross page/buffer case.
1671 */
1672 if (pVCpu->iem.s.pbInstrBuf != NULL)
1673 {
1674 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1675 {
1676 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1677 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1678 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1679
1680 cbDst -= cbCopy;
1681 pvDst = (uint8_t *)pvDst + cbCopy;
1682 offBuf += cbCopy;
1683 pVCpu->iem.s.offInstrNextByte += offBuf;
1684 }
1685 }
1686
1687 /*
1688 * Check segment limit, figuring how much we're allowed to access at this point.
1689 *
1690 * We will fault immediately if RIP is past the segment limit / in non-canonical
1691 * territory. If we do continue, there are one or more bytes to read before we
1692 * end up in trouble and we need to do that first before faulting.
1693 */
1694 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1695 RTGCPTR GCPtrFirst;
1696 uint32_t cbMaxRead;
1697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1698 {
1699 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1700 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1701 { /* likely */ }
1702 else
1703 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1704 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 }
1706 else
1707 {
1708 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1709 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1710 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1711 { /* likely */ }
1712 else
1713 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1714 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1715 if (cbMaxRead != 0)
1716 { /* likely */ }
1717 else
1718 {
1719 /* Overflowed because address is 0 and limit is max. */
1720 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1721 cbMaxRead = X86_PAGE_SIZE;
1722 }
1723 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1724 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 if (cbMaxRead2 < cbMaxRead)
1726 cbMaxRead = cbMaxRead2;
1727 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1728 }
1729
1730 /*
1731 * Get the TLB entry for this piece of code.
1732 */
1733 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1734 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1735 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1736 if (pTlbe->uTag == uTag)
1737 {
1738 /* likely when executing lots of code, otherwise unlikely */
1739# ifdef VBOX_WITH_STATISTICS
1740 pVCpu->iem.s.CodeTlb.cTlbHits++;
1741# endif
1742 }
1743 else
1744 {
1745 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1746# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1747 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1748 {
1749 pTlbe->uTag = uTag;
1750 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1751 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1752 pTlbe->GCPhys = NIL_RTGCPHYS;
1753 pTlbe->pbMappingR3 = NULL;
1754 }
1755 else
1756# endif
1757 {
1758 RTGCPHYS GCPhys;
1759 uint64_t fFlags;
1760 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1761 if (RT_FAILURE(rc))
1762 {
1763 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1764 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1765 }
1766
1767 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1768 pTlbe->uTag = uTag;
1769 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1770 pTlbe->GCPhys = GCPhys;
1771 pTlbe->pbMappingR3 = NULL;
1772 }
1773 }
1774
1775 /*
1776 * Check TLB page table level access flags.
1777 */
1778 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1779 {
1780 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1781 {
1782 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1783 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1784 }
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1786 {
1787 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 }
1791
1792# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1793 /*
1794 * Allow interpretation of patch manager code blocks since they can for
1795 * instance throw #PFs for perfectly good reasons.
1796 */
1797 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1798 { /* no unlikely */ }
1799 else
1800 {
1801 /** @todo Could be optimized this a little in ring-3 if we liked. */
1802 size_t cbRead = 0;
1803 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1804 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1805 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1806 return;
1807 }
1808# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1809
1810 /*
1811 * Look up the physical page info if necessary.
1812 */
1813 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1814 { /* not necessary */ }
1815 else
1816 {
1817 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1818 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1820 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1821 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1822 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1823 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1824 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1825 }
1826
1827# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1828 /*
1829 * Try do a direct read using the pbMappingR3 pointer.
1830 */
1831 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1832 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1833 {
1834 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1835 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1836 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1837 {
1838 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1839 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1840 }
1841 else
1842 {
1843 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1844 Assert(cbInstr < cbMaxRead);
1845 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1846 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1847 }
1848 if (cbDst <= cbMaxRead)
1849 {
1850 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1851 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1852 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1853 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1854 return;
1855 }
1856 pVCpu->iem.s.pbInstrBuf = NULL;
1857
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1859 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1860 }
1861 else
1862# endif
1863#if 0
1864 /*
1865 * If there is no special read handling, so we can read a bit more and
1866 * put it in the prefetch buffer.
1867 */
1868 if ( cbDst < cbMaxRead
1869 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1870 {
1871 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1872 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1873 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1874 { /* likely */ }
1875 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1876 {
1877 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1878 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1879 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1880 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1881 }
1882 else
1883 {
1884 Log((RT_SUCCESS(rcStrict)
1885 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1886 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1887 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1888 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1889 }
1890 }
1891 /*
1892 * Special read handling, so only read exactly what's needed.
1893 * This is a highly unlikely scenario.
1894 */
1895 else
1896#endif
1897 {
1898 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1899 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1900 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1901 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1902 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1903 { /* likely */ }
1904 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1905 {
1906 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1907 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1908 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1909 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1910 }
1911 else
1912 {
1913 Log((RT_SUCCESS(rcStrict)
1914 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1915 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1916 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1917 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1918 }
1919 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1920 if (cbToRead == cbDst)
1921 return;
1922 }
1923
1924 /*
1925 * More to read, loop.
1926 */
1927 cbDst -= cbMaxRead;
1928 pvDst = (uint8_t *)pvDst + cbMaxRead;
1929 }
1930#else
1931 RT_NOREF(pvDst, cbDst);
1932 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1933#endif
1934}
1935
1936#else
1937
1938/**
1939 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1940 * exception if it fails.
1941 *
1942 * @returns Strict VBox status code.
1943 * @param pVCpu The cross context virtual CPU structure of the
1944 * calling thread.
1945 * @param cbMin The minimum number of bytes relative offOpcode
1946 * that must be read.
1947 */
1948IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1949{
1950 /*
1951 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1952 *
1953 * First translate CS:rIP to a physical address.
1954 */
1955 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1956 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1957 uint32_t cbToTryRead;
1958 RTGCPTR GCPtrNext;
1959 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1960 {
1961 cbToTryRead = PAGE_SIZE;
1962 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1963 if (!IEM_IS_CANONICAL(GCPtrNext))
1964 return iemRaiseGeneralProtectionFault0(pVCpu);
1965 }
1966 else
1967 {
1968 uint32_t GCPtrNext32 = pCtx->eip;
1969 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1970 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1971 if (GCPtrNext32 > pCtx->cs.u32Limit)
1972 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1973 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1974 if (!cbToTryRead) /* overflowed */
1975 {
1976 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1977 cbToTryRead = UINT32_MAX;
1978 /** @todo check out wrapping around the code segment. */
1979 }
1980 if (cbToTryRead < cbMin - cbLeft)
1981 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1982 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1983 }
1984
1985 /* Only read up to the end of the page, and make sure we don't read more
1986 than the opcode buffer can hold. */
1987 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1988 if (cbToTryRead > cbLeftOnPage)
1989 cbToTryRead = cbLeftOnPage;
1990 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1991 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1992/** @todo r=bird: Convert assertion into undefined opcode exception? */
1993 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1994
1995# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1996 /* Allow interpretation of patch manager code blocks since they can for
1997 instance throw #PFs for perfectly good reasons. */
1998 if (pVCpu->iem.s.fInPatchCode)
1999 {
2000 size_t cbRead = 0;
2001 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2002 AssertRCReturn(rc, rc);
2003 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2004 return VINF_SUCCESS;
2005 }
2006# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2007
2008 RTGCPHYS GCPhys;
2009 uint64_t fFlags;
2010 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2011 if (RT_FAILURE(rc))
2012 {
2013 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2014 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2015 }
2016 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2020 }
2021 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2027 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2028 /** @todo Check reserved bits and such stuff. PGM is better at doing
2029 * that, so do it when implementing the guest virtual address
2030 * TLB... */
2031
2032 /*
2033 * Read the bytes at this address.
2034 *
2035 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2036 * and since PATM should only patch the start of an instruction there
2037 * should be no need to check again here.
2038 */
2039 if (!pVCpu->iem.s.fBypassHandlers)
2040 {
2041 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2042 cbToTryRead, PGMACCESSORIGIN_IEM);
2043 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2044 { /* likely */ }
2045 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2046 {
2047 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2048 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2049 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2050 }
2051 else
2052 {
2053 Log((RT_SUCCESS(rcStrict)
2054 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2055 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2056 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2057 return rcStrict;
2058 }
2059 }
2060 else
2061 {
2062 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2063 if (RT_SUCCESS(rc))
2064 { /* likely */ }
2065 else
2066 {
2067 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2068 return rc;
2069 }
2070 }
2071 pVCpu->iem.s.cbOpcode += cbToTryRead;
2072 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2073
2074 return VINF_SUCCESS;
2075}
2076
2077#endif /* !IEM_WITH_CODE_TLB */
2078#ifndef IEM_WITH_SETJMP
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2082 *
2083 * @returns Strict VBox status code.
2084 * @param pVCpu The cross context virtual CPU structure of the
2085 * calling thread.
2086 * @param pb Where to return the opcode byte.
2087 */
2088DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2089{
2090 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2091 if (rcStrict == VINF_SUCCESS)
2092 {
2093 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2094 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2095 pVCpu->iem.s.offOpcode = offOpcode + 1;
2096 }
2097 else
2098 *pb = 0;
2099 return rcStrict;
2100}
2101
2102
2103/**
2104 * Fetches the next opcode byte.
2105 *
2106 * @returns Strict VBox status code.
2107 * @param pVCpu The cross context virtual CPU structure of the
2108 * calling thread.
2109 * @param pu8 Where to return the opcode byte.
2110 */
2111DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2112{
2113 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2114 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2115 {
2116 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2117 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2118 return VINF_SUCCESS;
2119 }
2120 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2121}
2122
2123#else /* IEM_WITH_SETJMP */
2124
2125/**
2126 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2127 *
2128 * @returns The opcode byte.
2129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2130 */
2131DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2132{
2133# ifdef IEM_WITH_CODE_TLB
2134 uint8_t u8;
2135 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2136 return u8;
2137# else
2138 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2139 if (rcStrict == VINF_SUCCESS)
2140 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2141 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2142# endif
2143}
2144
2145
2146/**
2147 * Fetches the next opcode byte, longjmp on error.
2148 *
2149 * @returns The opcode byte.
2150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2151 */
2152DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2153{
2154# ifdef IEM_WITH_CODE_TLB
2155 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2156 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2157 if (RT_LIKELY( pbBuf != NULL
2158 && offBuf < pVCpu->iem.s.cbInstrBuf))
2159 {
2160 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2161 return pbBuf[offBuf];
2162 }
2163# else
2164 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2165 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2166 {
2167 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2168 return pVCpu->iem.s.abOpcode[offOpcode];
2169 }
2170# endif
2171 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2172}
2173
2174#endif /* IEM_WITH_SETJMP */
2175
2176/**
2177 * Fetches the next opcode byte, returns automatically on failure.
2178 *
2179 * @param a_pu8 Where to return the opcode byte.
2180 * @remark Implicitly references pVCpu.
2181 */
2182#ifndef IEM_WITH_SETJMP
2183# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2184 do \
2185 { \
2186 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2187 if (rcStrict2 == VINF_SUCCESS) \
2188 { /* likely */ } \
2189 else \
2190 return rcStrict2; \
2191 } while (0)
2192#else
2193# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2194#endif /* IEM_WITH_SETJMP */
2195
2196
2197#ifndef IEM_WITH_SETJMP
2198/**
2199 * Fetches the next signed byte from the opcode stream.
2200 *
2201 * @returns Strict VBox status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pi8 Where to return the signed byte.
2204 */
2205DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2206{
2207 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2208}
2209#endif /* !IEM_WITH_SETJMP */
2210
2211
2212/**
2213 * Fetches the next signed byte from the opcode stream, returning automatically
2214 * on failure.
2215 *
2216 * @param a_pi8 Where to return the signed byte.
2217 * @remark Implicitly references pVCpu.
2218 */
2219#ifndef IEM_WITH_SETJMP
2220# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2221 do \
2222 { \
2223 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2224 if (rcStrict2 != VINF_SUCCESS) \
2225 return rcStrict2; \
2226 } while (0)
2227#else /* IEM_WITH_SETJMP */
2228# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2229
2230#endif /* IEM_WITH_SETJMP */
2231
2232#ifndef IEM_WITH_SETJMP
2233
2234/**
2235 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2236 *
2237 * @returns Strict VBox status code.
2238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2239 * @param pu16 Where to return the opcode dword.
2240 */
2241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2242{
2243 uint8_t u8;
2244 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2245 if (rcStrict == VINF_SUCCESS)
2246 *pu16 = (int8_t)u8;
2247 return rcStrict;
2248}
2249
2250
2251/**
2252 * Fetches the next signed byte from the opcode stream, extending it to
2253 * unsigned 16-bit.
2254 *
2255 * @returns Strict VBox status code.
2256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2257 * @param pu16 Where to return the unsigned word.
2258 */
2259DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2260{
2261 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2262 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2263 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2264
2265 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2266 pVCpu->iem.s.offOpcode = offOpcode + 1;
2267 return VINF_SUCCESS;
2268}
2269
2270#endif /* !IEM_WITH_SETJMP */
2271
2272/**
2273 * Fetches the next signed byte from the opcode stream and sign-extending it to
2274 * a word, returning automatically on failure.
2275 *
2276 * @param a_pu16 Where to return the word.
2277 * @remark Implicitly references pVCpu.
2278 */
2279#ifndef IEM_WITH_SETJMP
2280# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2281 do \
2282 { \
2283 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2284 if (rcStrict2 != VINF_SUCCESS) \
2285 return rcStrict2; \
2286 } while (0)
2287#else
2288# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2289#endif
2290
2291#ifndef IEM_WITH_SETJMP
2292
2293/**
2294 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2295 *
2296 * @returns Strict VBox status code.
2297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2298 * @param pu32 Where to return the opcode dword.
2299 */
2300DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2301{
2302 uint8_t u8;
2303 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2304 if (rcStrict == VINF_SUCCESS)
2305 *pu32 = (int8_t)u8;
2306 return rcStrict;
2307}
2308
2309
2310/**
2311 * Fetches the next signed byte from the opcode stream, extending it to
2312 * unsigned 32-bit.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu32 Where to return the unsigned dword.
2317 */
2318DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2319{
2320 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2321 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2322 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2323
2324 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2325 pVCpu->iem.s.offOpcode = offOpcode + 1;
2326 return VINF_SUCCESS;
2327}
2328
2329#endif /* !IEM_WITH_SETJMP */
2330
2331/**
2332 * Fetches the next signed byte from the opcode stream and sign-extending it to
2333 * a word, returning automatically on failure.
2334 *
2335 * @param a_pu32 Where to return the word.
2336 * @remark Implicitly references pVCpu.
2337 */
2338#ifndef IEM_WITH_SETJMP
2339#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2340 do \
2341 { \
2342 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2343 if (rcStrict2 != VINF_SUCCESS) \
2344 return rcStrict2; \
2345 } while (0)
2346#else
2347# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2348#endif
2349
2350#ifndef IEM_WITH_SETJMP
2351
2352/**
2353 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2354 *
2355 * @returns Strict VBox status code.
2356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2357 * @param pu64 Where to return the opcode qword.
2358 */
2359DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2360{
2361 uint8_t u8;
2362 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2363 if (rcStrict == VINF_SUCCESS)
2364 *pu64 = (int8_t)u8;
2365 return rcStrict;
2366}
2367
2368
2369/**
2370 * Fetches the next signed byte from the opcode stream, extending it to
2371 * unsigned 64-bit.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu64 Where to return the unsigned qword.
2376 */
2377DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2378{
2379 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2380 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2381 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2382
2383 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2384 pVCpu->iem.s.offOpcode = offOpcode + 1;
2385 return VINF_SUCCESS;
2386}
2387
2388#endif /* !IEM_WITH_SETJMP */
2389
2390
2391/**
2392 * Fetches the next signed byte from the opcode stream and sign-extending it to
2393 * a word, returning automatically on failure.
2394 *
2395 * @param a_pu64 Where to return the word.
2396 * @remark Implicitly references pVCpu.
2397 */
2398#ifndef IEM_WITH_SETJMP
2399# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2400 do \
2401 { \
2402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2403 if (rcStrict2 != VINF_SUCCESS) \
2404 return rcStrict2; \
2405 } while (0)
2406#else
2407# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2408#endif
2409
2410
2411#ifndef IEM_WITH_SETJMP
2412
2413/**
2414 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2415 *
2416 * @returns Strict VBox status code.
2417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2418 * @param pu16 Where to return the opcode word.
2419 */
2420DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2421{
2422 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2423 if (rcStrict == VINF_SUCCESS)
2424 {
2425 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2426# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2427 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2428# else
2429 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2430# endif
2431 pVCpu->iem.s.offOpcode = offOpcode + 2;
2432 }
2433 else
2434 *pu16 = 0;
2435 return rcStrict;
2436}
2437
2438
2439/**
2440 * Fetches the next opcode word.
2441 *
2442 * @returns Strict VBox status code.
2443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2444 * @param pu16 Where to return the opcode word.
2445 */
2446DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2447{
2448 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2449 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2450 {
2451 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2452# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2453 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2454# else
2455 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2456# endif
2457 return VINF_SUCCESS;
2458 }
2459 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2460}
2461
2462#else /* IEM_WITH_SETJMP */
2463
2464/**
2465 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2466 *
2467 * @returns The opcode word.
2468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2469 */
2470DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2471{
2472# ifdef IEM_WITH_CODE_TLB
2473 uint16_t u16;
2474 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2475 return u16;
2476# else
2477 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2478 if (rcStrict == VINF_SUCCESS)
2479 {
2480 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2481 pVCpu->iem.s.offOpcode += 2;
2482# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2483 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2484# else
2485 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486# endif
2487 }
2488 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2489# endif
2490}
2491
2492
2493/**
2494 * Fetches the next opcode word, longjmp on error.
2495 *
2496 * @returns The opcode word.
2497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2498 */
2499DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2500{
2501# ifdef IEM_WITH_CODE_TLB
2502 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2503 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2504 if (RT_LIKELY( pbBuf != NULL
2505 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2506 {
2507 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2508# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2509 return *(uint16_t const *)&pbBuf[offBuf];
2510# else
2511 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2512# endif
2513 }
2514# else
2515 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2516 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2517 {
2518 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2519# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2520 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2521# else
2522 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2523# endif
2524 }
2525# endif
2526 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2527}
2528
2529#endif /* IEM_WITH_SETJMP */
2530
2531
2532/**
2533 * Fetches the next opcode word, returns automatically on failure.
2534 *
2535 * @param a_pu16 Where to return the opcode word.
2536 * @remark Implicitly references pVCpu.
2537 */
2538#ifndef IEM_WITH_SETJMP
2539# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2540 do \
2541 { \
2542 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2543 if (rcStrict2 != VINF_SUCCESS) \
2544 return rcStrict2; \
2545 } while (0)
2546#else
2547# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2548#endif
2549
2550#ifndef IEM_WITH_SETJMP
2551
2552/**
2553 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2554 *
2555 * @returns Strict VBox status code.
2556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2557 * @param pu32 Where to return the opcode double word.
2558 */
2559DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2560{
2561 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2562 if (rcStrict == VINF_SUCCESS)
2563 {
2564 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2565 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2566 pVCpu->iem.s.offOpcode = offOpcode + 2;
2567 }
2568 else
2569 *pu32 = 0;
2570 return rcStrict;
2571}
2572
2573
2574/**
2575 * Fetches the next opcode word, zero extending it to a double word.
2576 *
2577 * @returns Strict VBox status code.
2578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2579 * @param pu32 Where to return the opcode double word.
2580 */
2581DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2582{
2583 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2584 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2585 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2586
2587 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2588 pVCpu->iem.s.offOpcode = offOpcode + 2;
2589 return VINF_SUCCESS;
2590}
2591
2592#endif /* !IEM_WITH_SETJMP */
2593
2594
2595/**
2596 * Fetches the next opcode word and zero extends it to a double word, returns
2597 * automatically on failure.
2598 *
2599 * @param a_pu32 Where to return the opcode double word.
2600 * @remark Implicitly references pVCpu.
2601 */
2602#ifndef IEM_WITH_SETJMP
2603# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2604 do \
2605 { \
2606 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2607 if (rcStrict2 != VINF_SUCCESS) \
2608 return rcStrict2; \
2609 } while (0)
2610#else
2611# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2612#endif
2613
2614#ifndef IEM_WITH_SETJMP
2615
2616/**
2617 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2618 *
2619 * @returns Strict VBox status code.
2620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2621 * @param pu64 Where to return the opcode quad word.
2622 */
2623DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2624{
2625 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2626 if (rcStrict == VINF_SUCCESS)
2627 {
2628 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2629 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2630 pVCpu->iem.s.offOpcode = offOpcode + 2;
2631 }
2632 else
2633 *pu64 = 0;
2634 return rcStrict;
2635}
2636
2637
2638/**
2639 * Fetches the next opcode word, zero extending it to a quad word.
2640 *
2641 * @returns Strict VBox status code.
2642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2643 * @param pu64 Where to return the opcode quad word.
2644 */
2645DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2646{
2647 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2648 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2649 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2650
2651 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2652 pVCpu->iem.s.offOpcode = offOpcode + 2;
2653 return VINF_SUCCESS;
2654}
2655
2656#endif /* !IEM_WITH_SETJMP */
2657
2658/**
2659 * Fetches the next opcode word and zero extends it to a quad word, returns
2660 * automatically on failure.
2661 *
2662 * @param a_pu64 Where to return the opcode quad word.
2663 * @remark Implicitly references pVCpu.
2664 */
2665#ifndef IEM_WITH_SETJMP
2666# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2667 do \
2668 { \
2669 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2670 if (rcStrict2 != VINF_SUCCESS) \
2671 return rcStrict2; \
2672 } while (0)
2673#else
2674# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2675#endif
2676
2677
2678#ifndef IEM_WITH_SETJMP
2679/**
2680 * Fetches the next signed word from the opcode stream.
2681 *
2682 * @returns Strict VBox status code.
2683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2684 * @param pi16 Where to return the signed word.
2685 */
2686DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2687{
2688 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2689}
2690#endif /* !IEM_WITH_SETJMP */
2691
2692
2693/**
2694 * Fetches the next signed word from the opcode stream, returning automatically
2695 * on failure.
2696 *
2697 * @param a_pi16 Where to return the signed word.
2698 * @remark Implicitly references pVCpu.
2699 */
2700#ifndef IEM_WITH_SETJMP
2701# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2702 do \
2703 { \
2704 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2705 if (rcStrict2 != VINF_SUCCESS) \
2706 return rcStrict2; \
2707 } while (0)
2708#else
2709# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2710#endif
2711
2712#ifndef IEM_WITH_SETJMP
2713
2714/**
2715 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2716 *
2717 * @returns Strict VBox status code.
2718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2719 * @param pu32 Where to return the opcode dword.
2720 */
2721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2722{
2723 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2724 if (rcStrict == VINF_SUCCESS)
2725 {
2726 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2727# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2728 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2729# else
2730 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2731 pVCpu->iem.s.abOpcode[offOpcode + 1],
2732 pVCpu->iem.s.abOpcode[offOpcode + 2],
2733 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2734# endif
2735 pVCpu->iem.s.offOpcode = offOpcode + 4;
2736 }
2737 else
2738 *pu32 = 0;
2739 return rcStrict;
2740}
2741
2742
2743/**
2744 * Fetches the next opcode dword.
2745 *
2746 * @returns Strict VBox status code.
2747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2748 * @param pu32 Where to return the opcode double word.
2749 */
2750DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2751{
2752 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2753 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2754 {
2755 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2756# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2757 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2758# else
2759 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2760 pVCpu->iem.s.abOpcode[offOpcode + 1],
2761 pVCpu->iem.s.abOpcode[offOpcode + 2],
2762 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2763# endif
2764 return VINF_SUCCESS;
2765 }
2766 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2767}
2768
2769#else /* !IEM_WITH_SETJMP */
2770
2771/**
2772 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2773 *
2774 * @returns The opcode dword.
2775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2776 */
2777DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2778{
2779# ifdef IEM_WITH_CODE_TLB
2780 uint32_t u32;
2781 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2782 return u32;
2783# else
2784 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2785 if (rcStrict == VINF_SUCCESS)
2786 {
2787 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2788 pVCpu->iem.s.offOpcode = offOpcode + 4;
2789# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2790 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2791# else
2792 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2793 pVCpu->iem.s.abOpcode[offOpcode + 1],
2794 pVCpu->iem.s.abOpcode[offOpcode + 2],
2795 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2796# endif
2797 }
2798 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2799# endif
2800}
2801
2802
2803/**
2804 * Fetches the next opcode dword, longjmp on error.
2805 *
2806 * @returns The opcode dword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2813 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2814 if (RT_LIKELY( pbBuf != NULL
2815 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2816 {
2817 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2818# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2819 return *(uint32_t const *)&pbBuf[offBuf];
2820# else
2821 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2822 pbBuf[offBuf + 1],
2823 pbBuf[offBuf + 2],
2824 pbBuf[offBuf + 3]);
2825# endif
2826 }
2827# else
2828 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2829 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2830 {
2831 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2832# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2833 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2834# else
2835 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2836 pVCpu->iem.s.abOpcode[offOpcode + 1],
2837 pVCpu->iem.s.abOpcode[offOpcode + 2],
2838 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2839# endif
2840 }
2841# endif
2842 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2843}
2844
2845#endif /* !IEM_WITH_SETJMP */
2846
2847
2848/**
2849 * Fetches the next opcode dword, returns automatically on failure.
2850 *
2851 * @param a_pu32 Where to return the opcode dword.
2852 * @remark Implicitly references pVCpu.
2853 */
2854#ifndef IEM_WITH_SETJMP
2855# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2856 do \
2857 { \
2858 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2859 if (rcStrict2 != VINF_SUCCESS) \
2860 return rcStrict2; \
2861 } while (0)
2862#else
2863# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2864#endif
2865
2866#ifndef IEM_WITH_SETJMP
2867
2868/**
2869 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2870 *
2871 * @returns Strict VBox status code.
2872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2873 * @param pu64 Where to return the opcode dword.
2874 */
2875DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2876{
2877 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2878 if (rcStrict == VINF_SUCCESS)
2879 {
2880 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2881 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2882 pVCpu->iem.s.abOpcode[offOpcode + 1],
2883 pVCpu->iem.s.abOpcode[offOpcode + 2],
2884 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2885 pVCpu->iem.s.offOpcode = offOpcode + 4;
2886 }
2887 else
2888 *pu64 = 0;
2889 return rcStrict;
2890}
2891
2892
2893/**
2894 * Fetches the next opcode dword, zero extending it to a quad word.
2895 *
2896 * @returns Strict VBox status code.
2897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2898 * @param pu64 Where to return the opcode quad word.
2899 */
2900DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2901{
2902 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2903 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2904 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2905
2906 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2907 pVCpu->iem.s.abOpcode[offOpcode + 1],
2908 pVCpu->iem.s.abOpcode[offOpcode + 2],
2909 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2910 pVCpu->iem.s.offOpcode = offOpcode + 4;
2911 return VINF_SUCCESS;
2912}
2913
2914#endif /* !IEM_WITH_SETJMP */
2915
2916
2917/**
2918 * Fetches the next opcode dword and zero extends it to a quad word, returns
2919 * automatically on failure.
2920 *
2921 * @param a_pu64 Where to return the opcode quad word.
2922 * @remark Implicitly references pVCpu.
2923 */
2924#ifndef IEM_WITH_SETJMP
2925# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2926 do \
2927 { \
2928 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2929 if (rcStrict2 != VINF_SUCCESS) \
2930 return rcStrict2; \
2931 } while (0)
2932#else
2933# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2934#endif
2935
2936
2937#ifndef IEM_WITH_SETJMP
2938/**
2939 * Fetches the next signed double word from the opcode stream.
2940 *
2941 * @returns Strict VBox status code.
2942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2943 * @param pi32 Where to return the signed double word.
2944 */
2945DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2946{
2947 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2948}
2949#endif
2950
2951/**
2952 * Fetches the next signed double word from the opcode stream, returning
2953 * automatically on failure.
2954 *
2955 * @param a_pi32 Where to return the signed double word.
2956 * @remark Implicitly references pVCpu.
2957 */
2958#ifndef IEM_WITH_SETJMP
2959# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2960 do \
2961 { \
2962 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2963 if (rcStrict2 != VINF_SUCCESS) \
2964 return rcStrict2; \
2965 } while (0)
2966#else
2967# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2968#endif
2969
2970#ifndef IEM_WITH_SETJMP
2971
2972/**
2973 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2974 *
2975 * @returns Strict VBox status code.
2976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2977 * @param pu64 Where to return the opcode qword.
2978 */
2979DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2980{
2981 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2982 if (rcStrict == VINF_SUCCESS)
2983 {
2984 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2985 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2986 pVCpu->iem.s.abOpcode[offOpcode + 1],
2987 pVCpu->iem.s.abOpcode[offOpcode + 2],
2988 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2989 pVCpu->iem.s.offOpcode = offOpcode + 4;
2990 }
2991 else
2992 *pu64 = 0;
2993 return rcStrict;
2994}
2995
2996
2997/**
2998 * Fetches the next opcode dword, sign extending it into a quad word.
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3002 * @param pu64 Where to return the opcode quad word.
3003 */
3004DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3005{
3006 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3007 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3008 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3009
3010 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3011 pVCpu->iem.s.abOpcode[offOpcode + 1],
3012 pVCpu->iem.s.abOpcode[offOpcode + 2],
3013 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3014 *pu64 = i32;
3015 pVCpu->iem.s.offOpcode = offOpcode + 4;
3016 return VINF_SUCCESS;
3017}
3018
3019#endif /* !IEM_WITH_SETJMP */
3020
3021
3022/**
3023 * Fetches the next opcode double word and sign extends it to a quad word,
3024 * returns automatically on failure.
3025 *
3026 * @param a_pu64 Where to return the opcode quad word.
3027 * @remark Implicitly references pVCpu.
3028 */
3029#ifndef IEM_WITH_SETJMP
3030# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3031 do \
3032 { \
3033 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3034 if (rcStrict2 != VINF_SUCCESS) \
3035 return rcStrict2; \
3036 } while (0)
3037#else
3038# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3039#endif
3040
3041#ifndef IEM_WITH_SETJMP
3042
3043/**
3044 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3045 *
3046 * @returns Strict VBox status code.
3047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3048 * @param pu64 Where to return the opcode qword.
3049 */
3050DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3051{
3052 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3053 if (rcStrict == VINF_SUCCESS)
3054 {
3055 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3056# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3057 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3058# else
3059 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3060 pVCpu->iem.s.abOpcode[offOpcode + 1],
3061 pVCpu->iem.s.abOpcode[offOpcode + 2],
3062 pVCpu->iem.s.abOpcode[offOpcode + 3],
3063 pVCpu->iem.s.abOpcode[offOpcode + 4],
3064 pVCpu->iem.s.abOpcode[offOpcode + 5],
3065 pVCpu->iem.s.abOpcode[offOpcode + 6],
3066 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3067# endif
3068 pVCpu->iem.s.offOpcode = offOpcode + 8;
3069 }
3070 else
3071 *pu64 = 0;
3072 return rcStrict;
3073}
3074
3075
3076/**
3077 * Fetches the next opcode qword.
3078 *
3079 * @returns Strict VBox status code.
3080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3081 * @param pu64 Where to return the opcode qword.
3082 */
3083DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3084{
3085 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3086 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3087 {
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3092 pVCpu->iem.s.abOpcode[offOpcode + 1],
3093 pVCpu->iem.s.abOpcode[offOpcode + 2],
3094 pVCpu->iem.s.abOpcode[offOpcode + 3],
3095 pVCpu->iem.s.abOpcode[offOpcode + 4],
3096 pVCpu->iem.s.abOpcode[offOpcode + 5],
3097 pVCpu->iem.s.abOpcode[offOpcode + 6],
3098 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3099# endif
3100 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3101 return VINF_SUCCESS;
3102 }
3103 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3104}
3105
3106#else /* IEM_WITH_SETJMP */
3107
3108/**
3109 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3110 *
3111 * @returns The opcode qword.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 */
3114DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3115{
3116# ifdef IEM_WITH_CODE_TLB
3117 uint64_t u64;
3118 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3119 return u64;
3120# else
3121 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3122 if (rcStrict == VINF_SUCCESS)
3123 {
3124 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3125 pVCpu->iem.s.offOpcode = offOpcode + 8;
3126# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3127 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3128# else
3129 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3130 pVCpu->iem.s.abOpcode[offOpcode + 1],
3131 pVCpu->iem.s.abOpcode[offOpcode + 2],
3132 pVCpu->iem.s.abOpcode[offOpcode + 3],
3133 pVCpu->iem.s.abOpcode[offOpcode + 4],
3134 pVCpu->iem.s.abOpcode[offOpcode + 5],
3135 pVCpu->iem.s.abOpcode[offOpcode + 6],
3136 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3137# endif
3138 }
3139 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3140# endif
3141}
3142
3143
3144/**
3145 * Fetches the next opcode qword, longjmp on error.
3146 *
3147 * @returns The opcode qword.
3148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3149 */
3150DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3151{
3152# ifdef IEM_WITH_CODE_TLB
3153 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3154 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3155 if (RT_LIKELY( pbBuf != NULL
3156 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3157 {
3158 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3159# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3160 return *(uint64_t const *)&pbBuf[offBuf];
3161# else
3162 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3163 pbBuf[offBuf + 1],
3164 pbBuf[offBuf + 2],
3165 pbBuf[offBuf + 3],
3166 pbBuf[offBuf + 4],
3167 pbBuf[offBuf + 5],
3168 pbBuf[offBuf + 6],
3169 pbBuf[offBuf + 7]);
3170# endif
3171 }
3172# else
3173 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3174 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3175 {
3176 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3178 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3179# else
3180 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3181 pVCpu->iem.s.abOpcode[offOpcode + 1],
3182 pVCpu->iem.s.abOpcode[offOpcode + 2],
3183 pVCpu->iem.s.abOpcode[offOpcode + 3],
3184 pVCpu->iem.s.abOpcode[offOpcode + 4],
3185 pVCpu->iem.s.abOpcode[offOpcode + 5],
3186 pVCpu->iem.s.abOpcode[offOpcode + 6],
3187 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3188# endif
3189 }
3190# endif
3191 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3192}
3193
3194#endif /* IEM_WITH_SETJMP */
3195
3196/**
3197 * Fetches the next opcode quad word, returns automatically on failure.
3198 *
3199 * @param a_pu64 Where to return the opcode quad word.
3200 * @remark Implicitly references pVCpu.
3201 */
3202#ifndef IEM_WITH_SETJMP
3203# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3204 do \
3205 { \
3206 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3207 if (rcStrict2 != VINF_SUCCESS) \
3208 return rcStrict2; \
3209 } while (0)
3210#else
3211# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3212#endif
3213
3214
3215/** @name Misc Worker Functions.
3216 * @{
3217 */
3218
3219/**
3220 * Gets the exception class for the specified exception vector.
3221 *
3222 * @returns The class of the specified exception.
3223 * @param uVector The exception vector.
3224 */
3225IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3226{
3227 Assert(uVector <= X86_XCPT_LAST);
3228 switch (uVector)
3229 {
3230 case X86_XCPT_DE:
3231 case X86_XCPT_TS:
3232 case X86_XCPT_NP:
3233 case X86_XCPT_SS:
3234 case X86_XCPT_GP:
3235 case X86_XCPT_SX: /* AMD only */
3236 return IEMXCPTCLASS_CONTRIBUTORY;
3237
3238 case X86_XCPT_PF:
3239 case X86_XCPT_VE: /* Intel only */
3240 return IEMXCPTCLASS_PAGE_FAULT;
3241 }
3242 return IEMXCPTCLASS_BENIGN;
3243}
3244
3245
3246/**
3247 * Evaluates how to handle an exception caused during delivery of another event
3248 * (exception / interrupt).
3249 *
3250 * @returns How to handle the recursive exception.
3251 * @param pVCpu The cross context virtual CPU structure of the
3252 * calling thread.
3253 * @param fPrevFlags The flags of the previous event.
3254 * @param uPrevVector The vector of the previous event.
3255 * @param fCurFlags The flags of the current exception.
3256 * @param uCurVector The vector of the current exception.
3257 * @param pfXcptRaiseInfo Where to store additional information about the
3258 * exception condition. Optional.
3259 */
3260VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3261 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3262{
3263 /*
3264 * Only CPU exceptions can be raised while delivering other events, software interrupt
3265 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3266 */
3267 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3268 Assert(pVCpu); RT_NOREF(pVCpu);
3269
3270 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3271 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3272 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3273 {
3274 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3275 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3276 {
3277 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3278 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3279 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3280 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3281 {
3282 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3283 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3284 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3285 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3286 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3287 }
3288 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3289 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3290 {
3291 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3292 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3293 }
3294 else if ( uPrevVector == X86_XCPT_DF
3295 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3296 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3297 {
3298 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3299 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3300 }
3301 }
3302 else
3303 {
3304 if (uPrevVector == X86_XCPT_NMI)
3305 {
3306 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3307 if (uCurVector == X86_XCPT_PF)
3308 {
3309 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3310 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3311 }
3312 }
3313 else if ( uPrevVector == X86_XCPT_AC
3314 && uCurVector == X86_XCPT_AC)
3315 {
3316 enmRaise = IEMXCPTRAISE_CPU_HANG;
3317 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3318 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3319 }
3320 }
3321 }
3322 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3323 {
3324 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3325 if (uCurVector == X86_XCPT_PF)
3326 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3327 }
3328 else
3329 {
3330 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3331 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3332 }
3333
3334 if (pfXcptRaiseInfo)
3335 *pfXcptRaiseInfo = fRaiseInfo;
3336 return enmRaise;
3337}
3338
3339
3340/**
3341 * Enters the CPU shutdown state initiated by a triple fault or other
3342 * unrecoverable conditions.
3343 *
3344 * @returns Strict VBox status code.
3345 * @param pVCpu The cross context virtual CPU structure of the
3346 * calling thread.
3347 */
3348IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3349{
3350 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3351 {
3352 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3353 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3354 }
3355
3356 RT_NOREF(pVCpu);
3357 return VINF_EM_TRIPLE_FAULT;
3358}
3359
3360
3361/**
3362 * Validates a new SS segment.
3363 *
3364 * @returns VBox strict status code.
3365 * @param pVCpu The cross context virtual CPU structure of the
3366 * calling thread.
3367 * @param pCtx The CPU context.
3368 * @param NewSS The new SS selctor.
3369 * @param uCpl The CPL to load the stack for.
3370 * @param pDesc Where to return the descriptor.
3371 */
3372IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3373{
3374 NOREF(pCtx);
3375
3376 /* Null selectors are not allowed (we're not called for dispatching
3377 interrupts with SS=0 in long mode). */
3378 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3379 {
3380 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3381 return iemRaiseTaskSwitchFault0(pVCpu);
3382 }
3383
3384 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3385 if ((NewSS & X86_SEL_RPL) != uCpl)
3386 {
3387 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3388 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3389 }
3390
3391 /*
3392 * Read the descriptor.
3393 */
3394 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3395 if (rcStrict != VINF_SUCCESS)
3396 return rcStrict;
3397
3398 /*
3399 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3400 */
3401 if (!pDesc->Legacy.Gen.u1DescType)
3402 {
3403 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3404 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3405 }
3406
3407 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3408 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3409 {
3410 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3411 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3412 }
3413 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3414 {
3415 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3416 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3417 }
3418
3419 /* Is it there? */
3420 /** @todo testcase: Is this checked before the canonical / limit check below? */
3421 if (!pDesc->Legacy.Gen.u1Present)
3422 {
3423 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3424 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3425 }
3426
3427 return VINF_SUCCESS;
3428}
3429
3430
3431/**
3432 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3433 * not.
3434 *
3435 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3436 * @param a_pCtx The CPU context.
3437 */
3438#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3439# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3440 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3441 ? (a_pCtx)->eflags.u \
3442 : CPUMRawGetEFlags(a_pVCpu) )
3443#else
3444# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3445 ( (a_pCtx)->eflags.u )
3446#endif
3447
3448/**
3449 * Updates the EFLAGS in the correct manner wrt. PATM.
3450 *
3451 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3452 * @param a_pCtx The CPU context.
3453 * @param a_fEfl The new EFLAGS.
3454 */
3455#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3456# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3457 do { \
3458 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3459 (a_pCtx)->eflags.u = (a_fEfl); \
3460 else \
3461 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3462 } while (0)
3463#else
3464# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3465 do { \
3466 (a_pCtx)->eflags.u = (a_fEfl); \
3467 } while (0)
3468#endif
3469
3470
3471/** @} */
3472
3473/** @name Raising Exceptions.
3474 *
3475 * @{
3476 */
3477
3478
3479/**
3480 * Loads the specified stack far pointer from the TSS.
3481 *
3482 * @returns VBox strict status code.
3483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3484 * @param pCtx The CPU context.
3485 * @param uCpl The CPL to load the stack for.
3486 * @param pSelSS Where to return the new stack segment.
3487 * @param puEsp Where to return the new stack pointer.
3488 */
3489IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3490 PRTSEL pSelSS, uint32_t *puEsp)
3491{
3492 VBOXSTRICTRC rcStrict;
3493 Assert(uCpl < 4);
3494
3495 switch (pCtx->tr.Attr.n.u4Type)
3496 {
3497 /*
3498 * 16-bit TSS (X86TSS16).
3499 */
3500 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3501 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3502 {
3503 uint32_t off = uCpl * 4 + 2;
3504 if (off + 4 <= pCtx->tr.u32Limit)
3505 {
3506 /** @todo check actual access pattern here. */
3507 uint32_t u32Tmp = 0; /* gcc maybe... */
3508 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3509 if (rcStrict == VINF_SUCCESS)
3510 {
3511 *puEsp = RT_LOWORD(u32Tmp);
3512 *pSelSS = RT_HIWORD(u32Tmp);
3513 return VINF_SUCCESS;
3514 }
3515 }
3516 else
3517 {
3518 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3519 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3520 }
3521 break;
3522 }
3523
3524 /*
3525 * 32-bit TSS (X86TSS32).
3526 */
3527 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3528 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3529 {
3530 uint32_t off = uCpl * 8 + 4;
3531 if (off + 7 <= pCtx->tr.u32Limit)
3532 {
3533/** @todo check actual access pattern here. */
3534 uint64_t u64Tmp;
3535 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3536 if (rcStrict == VINF_SUCCESS)
3537 {
3538 *puEsp = u64Tmp & UINT32_MAX;
3539 *pSelSS = (RTSEL)(u64Tmp >> 32);
3540 return VINF_SUCCESS;
3541 }
3542 }
3543 else
3544 {
3545 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3546 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3547 }
3548 break;
3549 }
3550
3551 default:
3552 AssertFailed();
3553 rcStrict = VERR_IEM_IPE_4;
3554 break;
3555 }
3556
3557 *puEsp = 0; /* make gcc happy */
3558 *pSelSS = 0; /* make gcc happy */
3559 return rcStrict;
3560}
3561
3562
3563/**
3564 * Loads the specified stack pointer from the 64-bit TSS.
3565 *
3566 * @returns VBox strict status code.
3567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3568 * @param pCtx The CPU context.
3569 * @param uCpl The CPL to load the stack for.
3570 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3571 * @param puRsp Where to return the new stack pointer.
3572 */
3573IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3574{
3575 Assert(uCpl < 4);
3576 Assert(uIst < 8);
3577 *puRsp = 0; /* make gcc happy */
3578
3579 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3580
3581 uint32_t off;
3582 if (uIst)
3583 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3584 else
3585 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3586 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3587 {
3588 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3589 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3590 }
3591
3592 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3593}
3594
3595
3596/**
3597 * Adjust the CPU state according to the exception being raised.
3598 *
3599 * @param pCtx The CPU context.
3600 * @param u8Vector The exception that has been raised.
3601 */
3602DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3603{
3604 switch (u8Vector)
3605 {
3606 case X86_XCPT_DB:
3607 pCtx->dr[7] &= ~X86_DR7_GD;
3608 break;
3609 /** @todo Read the AMD and Intel exception reference... */
3610 }
3611}
3612
3613
3614/**
3615 * Implements exceptions and interrupts for real mode.
3616 *
3617 * @returns VBox strict status code.
3618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3619 * @param pCtx The CPU context.
3620 * @param cbInstr The number of bytes to offset rIP by in the return
3621 * address.
3622 * @param u8Vector The interrupt / exception vector number.
3623 * @param fFlags The flags.
3624 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3625 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3626 */
3627IEM_STATIC VBOXSTRICTRC
3628iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3629 PCPUMCTX pCtx,
3630 uint8_t cbInstr,
3631 uint8_t u8Vector,
3632 uint32_t fFlags,
3633 uint16_t uErr,
3634 uint64_t uCr2)
3635{
3636 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3637 NOREF(uErr); NOREF(uCr2);
3638
3639 /*
3640 * Read the IDT entry.
3641 */
3642 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3643 {
3644 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3645 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3646 }
3647 RTFAR16 Idte;
3648 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3649 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3650 return rcStrict;
3651
3652 /*
3653 * Push the stack frame.
3654 */
3655 uint16_t *pu16Frame;
3656 uint64_t uNewRsp;
3657 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3658 if (rcStrict != VINF_SUCCESS)
3659 return rcStrict;
3660
3661 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3662#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3663 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3664 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3665 fEfl |= UINT16_C(0xf000);
3666#endif
3667 pu16Frame[2] = (uint16_t)fEfl;
3668 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3669 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3670 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3671 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3672 return rcStrict;
3673
3674 /*
3675 * Load the vector address into cs:ip and make exception specific state
3676 * adjustments.
3677 */
3678 pCtx->cs.Sel = Idte.sel;
3679 pCtx->cs.ValidSel = Idte.sel;
3680 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3681 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3682 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3683 pCtx->rip = Idte.off;
3684 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3685 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3686
3687 /** @todo do we actually do this in real mode? */
3688 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3689 iemRaiseXcptAdjustState(pCtx, u8Vector);
3690
3691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3692}
3693
3694
3695/**
3696 * Loads a NULL data selector into when coming from V8086 mode.
3697 *
3698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3699 * @param pSReg Pointer to the segment register.
3700 */
3701IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3702{
3703 pSReg->Sel = 0;
3704 pSReg->ValidSel = 0;
3705 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3706 {
3707 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3708 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3709 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3710 }
3711 else
3712 {
3713 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3714 /** @todo check this on AMD-V */
3715 pSReg->u64Base = 0;
3716 pSReg->u32Limit = 0;
3717 }
3718}
3719
3720
3721/**
3722 * Loads a segment selector during a task switch in V8086 mode.
3723 *
3724 * @param pSReg Pointer to the segment register.
3725 * @param uSel The selector value to load.
3726 */
3727IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3728{
3729 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3730 pSReg->Sel = uSel;
3731 pSReg->ValidSel = uSel;
3732 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3733 pSReg->u64Base = uSel << 4;
3734 pSReg->u32Limit = 0xffff;
3735 pSReg->Attr.u = 0xf3;
3736}
3737
3738
3739/**
3740 * Loads a NULL data selector into a selector register, both the hidden and
3741 * visible parts, in protected mode.
3742 *
3743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3744 * @param pSReg Pointer to the segment register.
3745 * @param uRpl The RPL.
3746 */
3747IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3748{
3749 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3750 * data selector in protected mode. */
3751 pSReg->Sel = uRpl;
3752 pSReg->ValidSel = uRpl;
3753 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3754 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3755 {
3756 /* VT-x (Intel 3960x) observed doing something like this. */
3757 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3758 pSReg->u32Limit = UINT32_MAX;
3759 pSReg->u64Base = 0;
3760 }
3761 else
3762 {
3763 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3764 pSReg->u32Limit = 0;
3765 pSReg->u64Base = 0;
3766 }
3767}
3768
3769
3770/**
3771 * Loads a segment selector during a task switch in protected mode.
3772 *
3773 * In this task switch scenario, we would throw \#TS exceptions rather than
3774 * \#GPs.
3775 *
3776 * @returns VBox strict status code.
3777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3778 * @param pSReg Pointer to the segment register.
3779 * @param uSel The new selector value.
3780 *
3781 * @remarks This does _not_ handle CS or SS.
3782 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3783 */
3784IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3785{
3786 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3787
3788 /* Null data selector. */
3789 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3790 {
3791 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3792 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3793 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3794 return VINF_SUCCESS;
3795 }
3796
3797 /* Fetch the descriptor. */
3798 IEMSELDESC Desc;
3799 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3800 if (rcStrict != VINF_SUCCESS)
3801 {
3802 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3803 VBOXSTRICTRC_VAL(rcStrict)));
3804 return rcStrict;
3805 }
3806
3807 /* Must be a data segment or readable code segment. */
3808 if ( !Desc.Legacy.Gen.u1DescType
3809 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3810 {
3811 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3812 Desc.Legacy.Gen.u4Type));
3813 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3814 }
3815
3816 /* Check privileges for data segments and non-conforming code segments. */
3817 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3818 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3819 {
3820 /* The RPL and the new CPL must be less than or equal to the DPL. */
3821 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3822 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3823 {
3824 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3825 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3826 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3827 }
3828 }
3829
3830 /* Is it there? */
3831 if (!Desc.Legacy.Gen.u1Present)
3832 {
3833 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3834 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3835 }
3836
3837 /* The base and limit. */
3838 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3839 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3840
3841 /*
3842 * Ok, everything checked out fine. Now set the accessed bit before
3843 * committing the result into the registers.
3844 */
3845 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3846 {
3847 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3848 if (rcStrict != VINF_SUCCESS)
3849 return rcStrict;
3850 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3851 }
3852
3853 /* Commit */
3854 pSReg->Sel = uSel;
3855 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3856 pSReg->u32Limit = cbLimit;
3857 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3858 pSReg->ValidSel = uSel;
3859 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3860 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3861 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3862
3863 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3864 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3865 return VINF_SUCCESS;
3866}
3867
3868
3869/**
3870 * Performs a task switch.
3871 *
3872 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3873 * caller is responsible for performing the necessary checks (like DPL, TSS
3874 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3875 * reference for JMP, CALL, IRET.
3876 *
3877 * If the task switch is the due to a software interrupt or hardware exception,
3878 * the caller is responsible for validating the TSS selector and descriptor. See
3879 * Intel Instruction reference for INT n.
3880 *
3881 * @returns VBox strict status code.
3882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3883 * @param pCtx The CPU context.
3884 * @param enmTaskSwitch What caused this task switch.
3885 * @param uNextEip The EIP effective after the task switch.
3886 * @param fFlags The flags.
3887 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3888 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3889 * @param SelTSS The TSS selector of the new task.
3890 * @param pNewDescTSS Pointer to the new TSS descriptor.
3891 */
3892IEM_STATIC VBOXSTRICTRC
3893iemTaskSwitch(PVMCPU pVCpu,
3894 PCPUMCTX pCtx,
3895 IEMTASKSWITCH enmTaskSwitch,
3896 uint32_t uNextEip,
3897 uint32_t fFlags,
3898 uint16_t uErr,
3899 uint64_t uCr2,
3900 RTSEL SelTSS,
3901 PIEMSELDESC pNewDescTSS)
3902{
3903 Assert(!IEM_IS_REAL_MODE(pVCpu));
3904 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3905
3906 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3907 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3908 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3909 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3910 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3911
3912 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3913 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3914
3915 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3916 fIsNewTSS386, pCtx->eip, uNextEip));
3917
3918 /* Update CR2 in case it's a page-fault. */
3919 /** @todo This should probably be done much earlier in IEM/PGM. See
3920 * @bugref{5653#c49}. */
3921 if (fFlags & IEM_XCPT_FLAGS_CR2)
3922 pCtx->cr2 = uCr2;
3923
3924 /*
3925 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3926 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3927 */
3928 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3929 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3930 if (uNewTSSLimit < uNewTSSLimitMin)
3931 {
3932 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3933 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3934 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3935 }
3936
3937 /*
3938 * Check the current TSS limit. The last written byte to the current TSS during the
3939 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3940 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3941 *
3942 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3943 * end up with smaller than "legal" TSS limits.
3944 */
3945 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3946 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3947 if (uCurTSSLimit < uCurTSSLimitMin)
3948 {
3949 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3950 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3951 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3952 }
3953
3954 /*
3955 * Verify that the new TSS can be accessed and map it. Map only the required contents
3956 * and not the entire TSS.
3957 */
3958 void *pvNewTSS;
3959 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3960 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3961 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3962 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3963 * not perform correct translation if this happens. See Intel spec. 7.2.1
3964 * "Task-State Segment" */
3965 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3966 if (rcStrict != VINF_SUCCESS)
3967 {
3968 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3969 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3970 return rcStrict;
3971 }
3972
3973 /*
3974 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3975 */
3976 uint32_t u32EFlags = pCtx->eflags.u32;
3977 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3978 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3979 {
3980 PX86DESC pDescCurTSS;
3981 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3982 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3983 if (rcStrict != VINF_SUCCESS)
3984 {
3985 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3986 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3987 return rcStrict;
3988 }
3989
3990 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3991 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3992 if (rcStrict != VINF_SUCCESS)
3993 {
3994 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3995 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3996 return rcStrict;
3997 }
3998
3999 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4000 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4001 {
4002 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4003 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4004 u32EFlags &= ~X86_EFL_NT;
4005 }
4006 }
4007
4008 /*
4009 * Save the CPU state into the current TSS.
4010 */
4011 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4012 if (GCPtrNewTSS == GCPtrCurTSS)
4013 {
4014 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4015 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4016 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4017 }
4018 if (fIsNewTSS386)
4019 {
4020 /*
4021 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4022 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4023 */
4024 void *pvCurTSS32;
4025 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4026 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4027 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4028 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4029 if (rcStrict != VINF_SUCCESS)
4030 {
4031 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4032 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4033 return rcStrict;
4034 }
4035
4036 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4037 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4038 pCurTSS32->eip = uNextEip;
4039 pCurTSS32->eflags = u32EFlags;
4040 pCurTSS32->eax = pCtx->eax;
4041 pCurTSS32->ecx = pCtx->ecx;
4042 pCurTSS32->edx = pCtx->edx;
4043 pCurTSS32->ebx = pCtx->ebx;
4044 pCurTSS32->esp = pCtx->esp;
4045 pCurTSS32->ebp = pCtx->ebp;
4046 pCurTSS32->esi = pCtx->esi;
4047 pCurTSS32->edi = pCtx->edi;
4048 pCurTSS32->es = pCtx->es.Sel;
4049 pCurTSS32->cs = pCtx->cs.Sel;
4050 pCurTSS32->ss = pCtx->ss.Sel;
4051 pCurTSS32->ds = pCtx->ds.Sel;
4052 pCurTSS32->fs = pCtx->fs.Sel;
4053 pCurTSS32->gs = pCtx->gs.Sel;
4054
4055 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4056 if (rcStrict != VINF_SUCCESS)
4057 {
4058 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4059 VBOXSTRICTRC_VAL(rcStrict)));
4060 return rcStrict;
4061 }
4062 }
4063 else
4064 {
4065 /*
4066 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4067 */
4068 void *pvCurTSS16;
4069 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4070 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4071 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4072 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4073 if (rcStrict != VINF_SUCCESS)
4074 {
4075 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4076 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4077 return rcStrict;
4078 }
4079
4080 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4081 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4082 pCurTSS16->ip = uNextEip;
4083 pCurTSS16->flags = u32EFlags;
4084 pCurTSS16->ax = pCtx->ax;
4085 pCurTSS16->cx = pCtx->cx;
4086 pCurTSS16->dx = pCtx->dx;
4087 pCurTSS16->bx = pCtx->bx;
4088 pCurTSS16->sp = pCtx->sp;
4089 pCurTSS16->bp = pCtx->bp;
4090 pCurTSS16->si = pCtx->si;
4091 pCurTSS16->di = pCtx->di;
4092 pCurTSS16->es = pCtx->es.Sel;
4093 pCurTSS16->cs = pCtx->cs.Sel;
4094 pCurTSS16->ss = pCtx->ss.Sel;
4095 pCurTSS16->ds = pCtx->ds.Sel;
4096
4097 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4098 if (rcStrict != VINF_SUCCESS)
4099 {
4100 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4101 VBOXSTRICTRC_VAL(rcStrict)));
4102 return rcStrict;
4103 }
4104 }
4105
4106 /*
4107 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4108 */
4109 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4110 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4111 {
4112 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4113 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4114 pNewTSS->selPrev = pCtx->tr.Sel;
4115 }
4116
4117 /*
4118 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4119 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4120 */
4121 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4122 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4123 bool fNewDebugTrap;
4124 if (fIsNewTSS386)
4125 {
4126 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4127 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4128 uNewEip = pNewTSS32->eip;
4129 uNewEflags = pNewTSS32->eflags;
4130 uNewEax = pNewTSS32->eax;
4131 uNewEcx = pNewTSS32->ecx;
4132 uNewEdx = pNewTSS32->edx;
4133 uNewEbx = pNewTSS32->ebx;
4134 uNewEsp = pNewTSS32->esp;
4135 uNewEbp = pNewTSS32->ebp;
4136 uNewEsi = pNewTSS32->esi;
4137 uNewEdi = pNewTSS32->edi;
4138 uNewES = pNewTSS32->es;
4139 uNewCS = pNewTSS32->cs;
4140 uNewSS = pNewTSS32->ss;
4141 uNewDS = pNewTSS32->ds;
4142 uNewFS = pNewTSS32->fs;
4143 uNewGS = pNewTSS32->gs;
4144 uNewLdt = pNewTSS32->selLdt;
4145 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4146 }
4147 else
4148 {
4149 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4150 uNewCr3 = 0;
4151 uNewEip = pNewTSS16->ip;
4152 uNewEflags = pNewTSS16->flags;
4153 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4154 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4155 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4156 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4157 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4158 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4159 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4160 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4161 uNewES = pNewTSS16->es;
4162 uNewCS = pNewTSS16->cs;
4163 uNewSS = pNewTSS16->ss;
4164 uNewDS = pNewTSS16->ds;
4165 uNewFS = 0;
4166 uNewGS = 0;
4167 uNewLdt = pNewTSS16->selLdt;
4168 fNewDebugTrap = false;
4169 }
4170
4171 if (GCPtrNewTSS == GCPtrCurTSS)
4172 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4173 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4174
4175 /*
4176 * We're done accessing the new TSS.
4177 */
4178 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4179 if (rcStrict != VINF_SUCCESS)
4180 {
4181 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4182 return rcStrict;
4183 }
4184
4185 /*
4186 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4187 */
4188 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4189 {
4190 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4191 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4192 if (rcStrict != VINF_SUCCESS)
4193 {
4194 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4195 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4196 return rcStrict;
4197 }
4198
4199 /* Check that the descriptor indicates the new TSS is available (not busy). */
4200 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4201 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4202 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4203
4204 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4205 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4206 if (rcStrict != VINF_SUCCESS)
4207 {
4208 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4209 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4210 return rcStrict;
4211 }
4212 }
4213
4214 /*
4215 * From this point on, we're technically in the new task. We will defer exceptions
4216 * until the completion of the task switch but before executing any instructions in the new task.
4217 */
4218 pCtx->tr.Sel = SelTSS;
4219 pCtx->tr.ValidSel = SelTSS;
4220 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4221 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4222 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4223 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4224 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4225
4226 /* Set the busy bit in TR. */
4227 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4228 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4229 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4230 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4231 {
4232 uNewEflags |= X86_EFL_NT;
4233 }
4234
4235 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4236 pCtx->cr0 |= X86_CR0_TS;
4237 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4238
4239 pCtx->eip = uNewEip;
4240 pCtx->eax = uNewEax;
4241 pCtx->ecx = uNewEcx;
4242 pCtx->edx = uNewEdx;
4243 pCtx->ebx = uNewEbx;
4244 pCtx->esp = uNewEsp;
4245 pCtx->ebp = uNewEbp;
4246 pCtx->esi = uNewEsi;
4247 pCtx->edi = uNewEdi;
4248
4249 uNewEflags &= X86_EFL_LIVE_MASK;
4250 uNewEflags |= X86_EFL_RA1_MASK;
4251 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4252
4253 /*
4254 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4255 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4256 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4257 */
4258 pCtx->es.Sel = uNewES;
4259 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4260
4261 pCtx->cs.Sel = uNewCS;
4262 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4263
4264 pCtx->ss.Sel = uNewSS;
4265 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4266
4267 pCtx->ds.Sel = uNewDS;
4268 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4269
4270 pCtx->fs.Sel = uNewFS;
4271 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4272
4273 pCtx->gs.Sel = uNewGS;
4274 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4275 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4276
4277 pCtx->ldtr.Sel = uNewLdt;
4278 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4279 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4281
4282 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4283 {
4284 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4285 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4286 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4287 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4288 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4289 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4290 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4291 }
4292
4293 /*
4294 * Switch CR3 for the new task.
4295 */
4296 if ( fIsNewTSS386
4297 && (pCtx->cr0 & X86_CR0_PG))
4298 {
4299 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4300 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4301 {
4302 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4303 AssertRCSuccessReturn(rc, rc);
4304 }
4305 else
4306 pCtx->cr3 = uNewCr3;
4307
4308 /* Inform PGM. */
4309 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4310 {
4311 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4312 AssertRCReturn(rc, rc);
4313 /* ignore informational status codes */
4314 }
4315 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4316 }
4317
4318 /*
4319 * Switch LDTR for the new task.
4320 */
4321 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4322 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4323 else
4324 {
4325 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4326
4327 IEMSELDESC DescNewLdt;
4328 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4329 if (rcStrict != VINF_SUCCESS)
4330 {
4331 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4332 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4333 return rcStrict;
4334 }
4335 if ( !DescNewLdt.Legacy.Gen.u1Present
4336 || DescNewLdt.Legacy.Gen.u1DescType
4337 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4338 {
4339 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4340 uNewLdt, DescNewLdt.Legacy.u));
4341 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4342 }
4343
4344 pCtx->ldtr.ValidSel = uNewLdt;
4345 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4346 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4347 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4348 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4349 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4350 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4351 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4352 }
4353
4354 IEMSELDESC DescSS;
4355 if (IEM_IS_V86_MODE(pVCpu))
4356 {
4357 pVCpu->iem.s.uCpl = 3;
4358 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4359 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4360 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4361 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4362 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4363 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4364
4365 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4366 DescSS.Legacy.u = 0;
4367 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4368 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4369 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4370 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4371 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4372 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4373 DescSS.Legacy.Gen.u2Dpl = 3;
4374 }
4375 else
4376 {
4377 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4378
4379 /*
4380 * Load the stack segment for the new task.
4381 */
4382 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4383 {
4384 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4385 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4386 }
4387
4388 /* Fetch the descriptor. */
4389 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4390 if (rcStrict != VINF_SUCCESS)
4391 {
4392 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4393 VBOXSTRICTRC_VAL(rcStrict)));
4394 return rcStrict;
4395 }
4396
4397 /* SS must be a data segment and writable. */
4398 if ( !DescSS.Legacy.Gen.u1DescType
4399 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4400 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4401 {
4402 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4403 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4404 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4405 }
4406
4407 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4408 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4409 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4410 {
4411 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4412 uNewCpl));
4413 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4414 }
4415
4416 /* Is it there? */
4417 if (!DescSS.Legacy.Gen.u1Present)
4418 {
4419 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4420 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4421 }
4422
4423 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4424 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4425
4426 /* Set the accessed bit before committing the result into SS. */
4427 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4428 {
4429 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4430 if (rcStrict != VINF_SUCCESS)
4431 return rcStrict;
4432 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4433 }
4434
4435 /* Commit SS. */
4436 pCtx->ss.Sel = uNewSS;
4437 pCtx->ss.ValidSel = uNewSS;
4438 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4439 pCtx->ss.u32Limit = cbLimit;
4440 pCtx->ss.u64Base = u64Base;
4441 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4442 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4443
4444 /* CPL has changed, update IEM before loading rest of segments. */
4445 pVCpu->iem.s.uCpl = uNewCpl;
4446
4447 /*
4448 * Load the data segments for the new task.
4449 */
4450 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4451 if (rcStrict != VINF_SUCCESS)
4452 return rcStrict;
4453 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4454 if (rcStrict != VINF_SUCCESS)
4455 return rcStrict;
4456 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4457 if (rcStrict != VINF_SUCCESS)
4458 return rcStrict;
4459 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4460 if (rcStrict != VINF_SUCCESS)
4461 return rcStrict;
4462
4463 /*
4464 * Load the code segment for the new task.
4465 */
4466 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4467 {
4468 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4469 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4470 }
4471
4472 /* Fetch the descriptor. */
4473 IEMSELDESC DescCS;
4474 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4475 if (rcStrict != VINF_SUCCESS)
4476 {
4477 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4478 return rcStrict;
4479 }
4480
4481 /* CS must be a code segment. */
4482 if ( !DescCS.Legacy.Gen.u1DescType
4483 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4484 {
4485 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4486 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4487 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4488 }
4489
4490 /* For conforming CS, DPL must be less than or equal to the RPL. */
4491 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4492 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4493 {
4494 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4495 DescCS.Legacy.Gen.u2Dpl));
4496 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 /* For non-conforming CS, DPL must match RPL. */
4500 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4501 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4502 {
4503 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4504 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4505 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4506 }
4507
4508 /* Is it there? */
4509 if (!DescCS.Legacy.Gen.u1Present)
4510 {
4511 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4512 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4513 }
4514
4515 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4516 u64Base = X86DESC_BASE(&DescCS.Legacy);
4517
4518 /* Set the accessed bit before committing the result into CS. */
4519 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4520 {
4521 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4522 if (rcStrict != VINF_SUCCESS)
4523 return rcStrict;
4524 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4525 }
4526
4527 /* Commit CS. */
4528 pCtx->cs.Sel = uNewCS;
4529 pCtx->cs.ValidSel = uNewCS;
4530 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4531 pCtx->cs.u32Limit = cbLimit;
4532 pCtx->cs.u64Base = u64Base;
4533 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4534 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4535 }
4536
4537 /** @todo Debug trap. */
4538 if (fIsNewTSS386 && fNewDebugTrap)
4539 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4540
4541 /*
4542 * Construct the error code masks based on what caused this task switch.
4543 * See Intel Instruction reference for INT.
4544 */
4545 uint16_t uExt;
4546 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4547 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4548 {
4549 uExt = 1;
4550 }
4551 else
4552 uExt = 0;
4553
4554 /*
4555 * Push any error code on to the new stack.
4556 */
4557 if (fFlags & IEM_XCPT_FLAGS_ERR)
4558 {
4559 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4560 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4561 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4562
4563 /* Check that there is sufficient space on the stack. */
4564 /** @todo Factor out segment limit checking for normal/expand down segments
4565 * into a separate function. */
4566 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4567 {
4568 if ( pCtx->esp - 1 > cbLimitSS
4569 || pCtx->esp < cbStackFrame)
4570 {
4571 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4572 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4573 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4574 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4575 }
4576 }
4577 else
4578 {
4579 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4580 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4581 {
4582 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4583 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4584 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4585 }
4586 }
4587
4588
4589 if (fIsNewTSS386)
4590 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4591 else
4592 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4593 if (rcStrict != VINF_SUCCESS)
4594 {
4595 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4596 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4597 return rcStrict;
4598 }
4599 }
4600
4601 /* Check the new EIP against the new CS limit. */
4602 if (pCtx->eip > pCtx->cs.u32Limit)
4603 {
4604 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4605 pCtx->eip, pCtx->cs.u32Limit));
4606 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4607 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4608 }
4609
4610 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4611 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4612}
4613
4614
4615/**
4616 * Implements exceptions and interrupts for protected mode.
4617 *
4618 * @returns VBox strict status code.
4619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4620 * @param pCtx The CPU context.
4621 * @param cbInstr The number of bytes to offset rIP by in the return
4622 * address.
4623 * @param u8Vector The interrupt / exception vector number.
4624 * @param fFlags The flags.
4625 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4626 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4627 */
4628IEM_STATIC VBOXSTRICTRC
4629iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4630 PCPUMCTX pCtx,
4631 uint8_t cbInstr,
4632 uint8_t u8Vector,
4633 uint32_t fFlags,
4634 uint16_t uErr,
4635 uint64_t uCr2)
4636{
4637 /*
4638 * Read the IDT entry.
4639 */
4640 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4641 {
4642 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4643 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4644 }
4645 X86DESC Idte;
4646 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4647 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4648 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4649 return rcStrict;
4650 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4651 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4652 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4653
4654 /*
4655 * Check the descriptor type, DPL and such.
4656 * ASSUMES this is done in the same order as described for call-gate calls.
4657 */
4658 if (Idte.Gate.u1DescType)
4659 {
4660 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4661 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4662 }
4663 bool fTaskGate = false;
4664 uint8_t f32BitGate = true;
4665 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4666 switch (Idte.Gate.u4Type)
4667 {
4668 case X86_SEL_TYPE_SYS_UNDEFINED:
4669 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4670 case X86_SEL_TYPE_SYS_LDT:
4671 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4672 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4673 case X86_SEL_TYPE_SYS_UNDEFINED2:
4674 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4675 case X86_SEL_TYPE_SYS_UNDEFINED3:
4676 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4677 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4678 case X86_SEL_TYPE_SYS_UNDEFINED4:
4679 {
4680 /** @todo check what actually happens when the type is wrong...
4681 * esp. call gates. */
4682 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4683 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4684 }
4685
4686 case X86_SEL_TYPE_SYS_286_INT_GATE:
4687 f32BitGate = false;
4688 /* fall thru */
4689 case X86_SEL_TYPE_SYS_386_INT_GATE:
4690 fEflToClear |= X86_EFL_IF;
4691 break;
4692
4693 case X86_SEL_TYPE_SYS_TASK_GATE:
4694 fTaskGate = true;
4695#ifndef IEM_IMPLEMENTS_TASKSWITCH
4696 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4697#endif
4698 break;
4699
4700 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4701 f32BitGate = false;
4702 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4703 break;
4704
4705 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4706 }
4707
4708 /* Check DPL against CPL if applicable. */
4709 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4710 {
4711 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4712 {
4713 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4714 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4715 }
4716 }
4717
4718 /* Is it there? */
4719 if (!Idte.Gate.u1Present)
4720 {
4721 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4722 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4723 }
4724
4725 /* Is it a task-gate? */
4726 if (fTaskGate)
4727 {
4728 /*
4729 * Construct the error code masks based on what caused this task switch.
4730 * See Intel Instruction reference for INT.
4731 */
4732 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4733 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4734 RTSEL SelTSS = Idte.Gate.u16Sel;
4735
4736 /*
4737 * Fetch the TSS descriptor in the GDT.
4738 */
4739 IEMSELDESC DescTSS;
4740 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4741 if (rcStrict != VINF_SUCCESS)
4742 {
4743 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4744 VBOXSTRICTRC_VAL(rcStrict)));
4745 return rcStrict;
4746 }
4747
4748 /* The TSS descriptor must be a system segment and be available (not busy). */
4749 if ( DescTSS.Legacy.Gen.u1DescType
4750 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4751 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4752 {
4753 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4754 u8Vector, SelTSS, DescTSS.Legacy.au64));
4755 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4756 }
4757
4758 /* The TSS must be present. */
4759 if (!DescTSS.Legacy.Gen.u1Present)
4760 {
4761 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4762 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4763 }
4764
4765 /* Do the actual task switch. */
4766 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4767 }
4768
4769 /* A null CS is bad. */
4770 RTSEL NewCS = Idte.Gate.u16Sel;
4771 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4774 return iemRaiseGeneralProtectionFault0(pVCpu);
4775 }
4776
4777 /* Fetch the descriptor for the new CS. */
4778 IEMSELDESC DescCS;
4779 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4780 if (rcStrict != VINF_SUCCESS)
4781 {
4782 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4783 return rcStrict;
4784 }
4785
4786 /* Must be a code segment. */
4787 if (!DescCS.Legacy.Gen.u1DescType)
4788 {
4789 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4790 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4791 }
4792 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4793 {
4794 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4795 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4796 }
4797
4798 /* Don't allow lowering the privilege level. */
4799 /** @todo Does the lowering of privileges apply to software interrupts
4800 * only? This has bearings on the more-privileged or
4801 * same-privilege stack behavior further down. A testcase would
4802 * be nice. */
4803 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4804 {
4805 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4806 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4807 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4808 }
4809
4810 /* Make sure the selector is present. */
4811 if (!DescCS.Legacy.Gen.u1Present)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4814 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4815 }
4816
4817 /* Check the new EIP against the new CS limit. */
4818 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4819 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4820 ? Idte.Gate.u16OffsetLow
4821 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4822 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4823 if (uNewEip > cbLimitCS)
4824 {
4825 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4826 u8Vector, uNewEip, cbLimitCS, NewCS));
4827 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4828 }
4829 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4830
4831 /* Calc the flag image to push. */
4832 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4833 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4834 fEfl &= ~X86_EFL_RF;
4835 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4836 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4837
4838 /* From V8086 mode only go to CPL 0. */
4839 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4840 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4841 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4844 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4845 }
4846
4847 /*
4848 * If the privilege level changes, we need to get a new stack from the TSS.
4849 * This in turns means validating the new SS and ESP...
4850 */
4851 if (uNewCpl != pVCpu->iem.s.uCpl)
4852 {
4853 RTSEL NewSS;
4854 uint32_t uNewEsp;
4855 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4856 if (rcStrict != VINF_SUCCESS)
4857 return rcStrict;
4858
4859 IEMSELDESC DescSS;
4860 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4861 if (rcStrict != VINF_SUCCESS)
4862 return rcStrict;
4863 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4864 if (!DescSS.Legacy.Gen.u1DefBig)
4865 {
4866 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4867 uNewEsp = (uint16_t)uNewEsp;
4868 }
4869
4870 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4871
4872 /* Check that there is sufficient space for the stack frame. */
4873 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4874 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4875 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4876 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4877
4878 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4879 {
4880 if ( uNewEsp - 1 > cbLimitSS
4881 || uNewEsp < cbStackFrame)
4882 {
4883 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4884 u8Vector, NewSS, uNewEsp, cbStackFrame));
4885 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4886 }
4887 }
4888 else
4889 {
4890 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4891 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4892 {
4893 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4894 u8Vector, NewSS, uNewEsp, cbStackFrame));
4895 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4896 }
4897 }
4898
4899 /*
4900 * Start making changes.
4901 */
4902
4903 /* Set the new CPL so that stack accesses use it. */
4904 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4905 pVCpu->iem.s.uCpl = uNewCpl;
4906
4907 /* Create the stack frame. */
4908 RTPTRUNION uStackFrame;
4909 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4910 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913 void * const pvStackFrame = uStackFrame.pv;
4914 if (f32BitGate)
4915 {
4916 if (fFlags & IEM_XCPT_FLAGS_ERR)
4917 *uStackFrame.pu32++ = uErr;
4918 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4919 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4920 uStackFrame.pu32[2] = fEfl;
4921 uStackFrame.pu32[3] = pCtx->esp;
4922 uStackFrame.pu32[4] = pCtx->ss.Sel;
4923 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4924 if (fEfl & X86_EFL_VM)
4925 {
4926 uStackFrame.pu32[1] = pCtx->cs.Sel;
4927 uStackFrame.pu32[5] = pCtx->es.Sel;
4928 uStackFrame.pu32[6] = pCtx->ds.Sel;
4929 uStackFrame.pu32[7] = pCtx->fs.Sel;
4930 uStackFrame.pu32[8] = pCtx->gs.Sel;
4931 }
4932 }
4933 else
4934 {
4935 if (fFlags & IEM_XCPT_FLAGS_ERR)
4936 *uStackFrame.pu16++ = uErr;
4937 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4938 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4939 uStackFrame.pu16[2] = fEfl;
4940 uStackFrame.pu16[3] = pCtx->sp;
4941 uStackFrame.pu16[4] = pCtx->ss.Sel;
4942 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4943 if (fEfl & X86_EFL_VM)
4944 {
4945 uStackFrame.pu16[1] = pCtx->cs.Sel;
4946 uStackFrame.pu16[5] = pCtx->es.Sel;
4947 uStackFrame.pu16[6] = pCtx->ds.Sel;
4948 uStackFrame.pu16[7] = pCtx->fs.Sel;
4949 uStackFrame.pu16[8] = pCtx->gs.Sel;
4950 }
4951 }
4952 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4953 if (rcStrict != VINF_SUCCESS)
4954 return rcStrict;
4955
4956 /* Mark the selectors 'accessed' (hope this is the correct time). */
4957 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4958 * after pushing the stack frame? (Write protect the gdt + stack to
4959 * find out.) */
4960 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4961 {
4962 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4963 if (rcStrict != VINF_SUCCESS)
4964 return rcStrict;
4965 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4966 }
4967
4968 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4969 {
4970 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4971 if (rcStrict != VINF_SUCCESS)
4972 return rcStrict;
4973 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4974 }
4975
4976 /*
4977 * Start comitting the register changes (joins with the DPL=CPL branch).
4978 */
4979 pCtx->ss.Sel = NewSS;
4980 pCtx->ss.ValidSel = NewSS;
4981 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4982 pCtx->ss.u32Limit = cbLimitSS;
4983 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4984 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4985 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4986 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4987 * SP is loaded).
4988 * Need to check the other combinations too:
4989 * - 16-bit TSS, 32-bit handler
4990 * - 32-bit TSS, 16-bit handler */
4991 if (!pCtx->ss.Attr.n.u1DefBig)
4992 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4993 else
4994 pCtx->rsp = uNewEsp - cbStackFrame;
4995
4996 if (fEfl & X86_EFL_VM)
4997 {
4998 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4999 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5000 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5001 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5002 }
5003 }
5004 /*
5005 * Same privilege, no stack change and smaller stack frame.
5006 */
5007 else
5008 {
5009 uint64_t uNewRsp;
5010 RTPTRUNION uStackFrame;
5011 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5012 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5013 if (rcStrict != VINF_SUCCESS)
5014 return rcStrict;
5015 void * const pvStackFrame = uStackFrame.pv;
5016
5017 if (f32BitGate)
5018 {
5019 if (fFlags & IEM_XCPT_FLAGS_ERR)
5020 *uStackFrame.pu32++ = uErr;
5021 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5022 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5023 uStackFrame.pu32[2] = fEfl;
5024 }
5025 else
5026 {
5027 if (fFlags & IEM_XCPT_FLAGS_ERR)
5028 *uStackFrame.pu16++ = uErr;
5029 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5030 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5031 uStackFrame.pu16[2] = fEfl;
5032 }
5033 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5034 if (rcStrict != VINF_SUCCESS)
5035 return rcStrict;
5036
5037 /* Mark the CS selector as 'accessed'. */
5038 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5039 {
5040 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5041 if (rcStrict != VINF_SUCCESS)
5042 return rcStrict;
5043 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5044 }
5045
5046 /*
5047 * Start committing the register changes (joins with the other branch).
5048 */
5049 pCtx->rsp = uNewRsp;
5050 }
5051
5052 /* ... register committing continues. */
5053 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5054 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5055 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5056 pCtx->cs.u32Limit = cbLimitCS;
5057 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5058 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5059
5060 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5061 fEfl &= ~fEflToClear;
5062 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5063
5064 if (fFlags & IEM_XCPT_FLAGS_CR2)
5065 pCtx->cr2 = uCr2;
5066
5067 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5068 iemRaiseXcptAdjustState(pCtx, u8Vector);
5069
5070 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5071}
5072
5073
5074/**
5075 * Implements exceptions and interrupts for long mode.
5076 *
5077 * @returns VBox strict status code.
5078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5079 * @param pCtx The CPU context.
5080 * @param cbInstr The number of bytes to offset rIP by in the return
5081 * address.
5082 * @param u8Vector The interrupt / exception vector number.
5083 * @param fFlags The flags.
5084 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5085 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5086 */
5087IEM_STATIC VBOXSTRICTRC
5088iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5089 PCPUMCTX pCtx,
5090 uint8_t cbInstr,
5091 uint8_t u8Vector,
5092 uint32_t fFlags,
5093 uint16_t uErr,
5094 uint64_t uCr2)
5095{
5096 /*
5097 * Read the IDT entry.
5098 */
5099 uint16_t offIdt = (uint16_t)u8Vector << 4;
5100 if (pCtx->idtr.cbIdt < offIdt + 7)
5101 {
5102 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5103 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5104 }
5105 X86DESC64 Idte;
5106 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5107 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5108 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5109 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5110 return rcStrict;
5111 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5112 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5113 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5114
5115 /*
5116 * Check the descriptor type, DPL and such.
5117 * ASSUMES this is done in the same order as described for call-gate calls.
5118 */
5119 if (Idte.Gate.u1DescType)
5120 {
5121 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5122 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5123 }
5124 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5125 switch (Idte.Gate.u4Type)
5126 {
5127 case AMD64_SEL_TYPE_SYS_INT_GATE:
5128 fEflToClear |= X86_EFL_IF;
5129 break;
5130 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5131 break;
5132
5133 default:
5134 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5135 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5136 }
5137
5138 /* Check DPL against CPL if applicable. */
5139 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5140 {
5141 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5142 {
5143 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5144 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5145 }
5146 }
5147
5148 /* Is it there? */
5149 if (!Idte.Gate.u1Present)
5150 {
5151 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5152 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5153 }
5154
5155 /* A null CS is bad. */
5156 RTSEL NewCS = Idte.Gate.u16Sel;
5157 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5158 {
5159 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5160 return iemRaiseGeneralProtectionFault0(pVCpu);
5161 }
5162
5163 /* Fetch the descriptor for the new CS. */
5164 IEMSELDESC DescCS;
5165 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5166 if (rcStrict != VINF_SUCCESS)
5167 {
5168 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5169 return rcStrict;
5170 }
5171
5172 /* Must be a 64-bit code segment. */
5173 if (!DescCS.Long.Gen.u1DescType)
5174 {
5175 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5176 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5177 }
5178 if ( !DescCS.Long.Gen.u1Long
5179 || DescCS.Long.Gen.u1DefBig
5180 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5183 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5184 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5185 }
5186
5187 /* Don't allow lowering the privilege level. For non-conforming CS
5188 selectors, the CS.DPL sets the privilege level the trap/interrupt
5189 handler runs at. For conforming CS selectors, the CPL remains
5190 unchanged, but the CS.DPL must be <= CPL. */
5191 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5192 * when CPU in Ring-0. Result \#GP? */
5193 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5194 {
5195 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5196 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5197 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5198 }
5199
5200
5201 /* Make sure the selector is present. */
5202 if (!DescCS.Legacy.Gen.u1Present)
5203 {
5204 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5205 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5206 }
5207
5208 /* Check that the new RIP is canonical. */
5209 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5210 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5211 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5212 if (!IEM_IS_CANONICAL(uNewRip))
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5215 return iemRaiseGeneralProtectionFault0(pVCpu);
5216 }
5217
5218 /*
5219 * If the privilege level changes or if the IST isn't zero, we need to get
5220 * a new stack from the TSS.
5221 */
5222 uint64_t uNewRsp;
5223 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5224 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5225 if ( uNewCpl != pVCpu->iem.s.uCpl
5226 || Idte.Gate.u3IST != 0)
5227 {
5228 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5229 if (rcStrict != VINF_SUCCESS)
5230 return rcStrict;
5231 }
5232 else
5233 uNewRsp = pCtx->rsp;
5234 uNewRsp &= ~(uint64_t)0xf;
5235
5236 /*
5237 * Calc the flag image to push.
5238 */
5239 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5240 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5241 fEfl &= ~X86_EFL_RF;
5242 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5243 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5244
5245 /*
5246 * Start making changes.
5247 */
5248 /* Set the new CPL so that stack accesses use it. */
5249 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5250 pVCpu->iem.s.uCpl = uNewCpl;
5251
5252 /* Create the stack frame. */
5253 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5254 RTPTRUNION uStackFrame;
5255 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5256 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5257 if (rcStrict != VINF_SUCCESS)
5258 return rcStrict;
5259 void * const pvStackFrame = uStackFrame.pv;
5260
5261 if (fFlags & IEM_XCPT_FLAGS_ERR)
5262 *uStackFrame.pu64++ = uErr;
5263 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5264 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5265 uStackFrame.pu64[2] = fEfl;
5266 uStackFrame.pu64[3] = pCtx->rsp;
5267 uStackFrame.pu64[4] = pCtx->ss.Sel;
5268 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5269 if (rcStrict != VINF_SUCCESS)
5270 return rcStrict;
5271
5272 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5273 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5274 * after pushing the stack frame? (Write protect the gdt + stack to
5275 * find out.) */
5276 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5277 {
5278 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5279 if (rcStrict != VINF_SUCCESS)
5280 return rcStrict;
5281 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5282 }
5283
5284 /*
5285 * Start comitting the register changes.
5286 */
5287 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5288 * hidden registers when interrupting 32-bit or 16-bit code! */
5289 if (uNewCpl != uOldCpl)
5290 {
5291 pCtx->ss.Sel = 0 | uNewCpl;
5292 pCtx->ss.ValidSel = 0 | uNewCpl;
5293 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5294 pCtx->ss.u32Limit = UINT32_MAX;
5295 pCtx->ss.u64Base = 0;
5296 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5297 }
5298 pCtx->rsp = uNewRsp - cbStackFrame;
5299 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5300 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5301 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5302 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5303 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5304 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5305 pCtx->rip = uNewRip;
5306
5307 fEfl &= ~fEflToClear;
5308 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5309
5310 if (fFlags & IEM_XCPT_FLAGS_CR2)
5311 pCtx->cr2 = uCr2;
5312
5313 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5314 iemRaiseXcptAdjustState(pCtx, u8Vector);
5315
5316 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5317}
5318
5319
5320/**
5321 * Implements exceptions and interrupts.
5322 *
5323 * All exceptions and interrupts goes thru this function!
5324 *
5325 * @returns VBox strict status code.
5326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5327 * @param cbInstr The number of bytes to offset rIP by in the return
5328 * address.
5329 * @param u8Vector The interrupt / exception vector number.
5330 * @param fFlags The flags.
5331 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5332 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5333 */
5334DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5335iemRaiseXcptOrInt(PVMCPU pVCpu,
5336 uint8_t cbInstr,
5337 uint8_t u8Vector,
5338 uint32_t fFlags,
5339 uint16_t uErr,
5340 uint64_t uCr2)
5341{
5342 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5343#ifdef IN_RING0
5344 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5345 AssertRCReturn(rc, rc);
5346#endif
5347
5348#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5349 /*
5350 * Flush prefetch buffer
5351 */
5352 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5353#endif
5354
5355 /*
5356 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5357 */
5358 if ( pCtx->eflags.Bits.u1VM
5359 && pCtx->eflags.Bits.u2IOPL != 3
5360 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5361 && (pCtx->cr0 & X86_CR0_PE) )
5362 {
5363 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5364 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5365 u8Vector = X86_XCPT_GP;
5366 uErr = 0;
5367 }
5368#ifdef DBGFTRACE_ENABLED
5369 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5370 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5371 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5372#endif
5373
5374#ifdef VBOX_WITH_NESTED_HWVIRT
5375 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5376 {
5377 /*
5378 * If the event is being injected as part of VMRUN, it isn't subject to event
5379 * intercepts in the nested-guest. However, secondary exceptions that occur
5380 * during injection of any event -are- subject to exception intercepts.
5381 * See AMD spec. 15.20 "Event Injection".
5382 */
5383 if (!pCtx->hwvirt.svm.fInterceptEvents)
5384 pCtx->hwvirt.svm.fInterceptEvents = 1;
5385 else
5386 {
5387 /*
5388 * Check and handle if the event being raised is intercepted.
5389 */
5390 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5391 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5392 return rcStrict0;
5393 }
5394 }
5395#endif /* VBOX_WITH_NESTED_HWVIRT */
5396
5397 /*
5398 * Do recursion accounting.
5399 */
5400 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5401 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5402 if (pVCpu->iem.s.cXcptRecursions == 0)
5403 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5404 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5405 else
5406 {
5407 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5408 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5409 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5410
5411 if (pVCpu->iem.s.cXcptRecursions >= 3)
5412 {
5413#ifdef DEBUG_bird
5414 AssertFailed();
5415#endif
5416 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5417 }
5418
5419 /*
5420 * Evaluate the sequence of recurring events.
5421 */
5422 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5423 NULL /* pXcptRaiseInfo */);
5424 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5425 { /* likely */ }
5426 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5427 {
5428 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5429 u8Vector = X86_XCPT_DF;
5430 uErr = 0;
5431 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5432 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5433 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5434 }
5435 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5436 {
5437 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5438 return iemInitiateCpuShutdown(pVCpu);
5439 }
5440 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5441 {
5442 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5443 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5444 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5445 return VERR_EM_GUEST_CPU_HANG;
5446 }
5447 else
5448 {
5449 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5450 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5451 return VERR_IEM_IPE_9;
5452 }
5453
5454 /*
5455 * The 'EXT' bit is set when an exception occurs during deliver of an external
5456 * event (such as an interrupt or earlier exception)[1]. Privileged software
5457 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5458 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5459 *
5460 * [1] - Intel spec. 6.13 "Error Code"
5461 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5462 * [3] - Intel Instruction reference for INT n.
5463 */
5464 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5465 && (fFlags & IEM_XCPT_FLAGS_ERR)
5466 && u8Vector != X86_XCPT_PF
5467 && u8Vector != X86_XCPT_DF)
5468 {
5469 uErr |= X86_TRAP_ERR_EXTERNAL;
5470 }
5471 }
5472
5473 pVCpu->iem.s.cXcptRecursions++;
5474 pVCpu->iem.s.uCurXcpt = u8Vector;
5475 pVCpu->iem.s.fCurXcpt = fFlags;
5476 pVCpu->iem.s.uCurXcptErr = uErr;
5477 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5478
5479 /*
5480 * Extensive logging.
5481 */
5482#if defined(LOG_ENABLED) && defined(IN_RING3)
5483 if (LogIs3Enabled())
5484 {
5485 PVM pVM = pVCpu->CTX_SUFF(pVM);
5486 char szRegs[4096];
5487 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5488 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5489 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5490 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5491 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5492 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5493 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5494 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5495 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5496 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5497 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5498 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5499 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5500 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5501 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5502 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5503 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5504 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5505 " efer=%016VR{efer}\n"
5506 " pat=%016VR{pat}\n"
5507 " sf_mask=%016VR{sf_mask}\n"
5508 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5509 " lstar=%016VR{lstar}\n"
5510 " star=%016VR{star} cstar=%016VR{cstar}\n"
5511 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5512 );
5513
5514 char szInstr[256];
5515 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5516 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5517 szInstr, sizeof(szInstr), NULL);
5518 Log3(("%s%s\n", szRegs, szInstr));
5519 }
5520#endif /* LOG_ENABLED */
5521
5522 /*
5523 * Call the mode specific worker function.
5524 */
5525 VBOXSTRICTRC rcStrict;
5526 if (!(pCtx->cr0 & X86_CR0_PE))
5527 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5528 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5529 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5530 else
5531 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5532
5533 /* Flush the prefetch buffer. */
5534#ifdef IEM_WITH_CODE_TLB
5535 pVCpu->iem.s.pbInstrBuf = NULL;
5536#else
5537 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5538#endif
5539
5540 /*
5541 * Unwind.
5542 */
5543 pVCpu->iem.s.cXcptRecursions--;
5544 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5545 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5546 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5547 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5548 return rcStrict;
5549}
5550
5551#ifdef IEM_WITH_SETJMP
5552/**
5553 * See iemRaiseXcptOrInt. Will not return.
5554 */
5555IEM_STATIC DECL_NO_RETURN(void)
5556iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5557 uint8_t cbInstr,
5558 uint8_t u8Vector,
5559 uint32_t fFlags,
5560 uint16_t uErr,
5561 uint64_t uCr2)
5562{
5563 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5564 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5565}
5566#endif
5567
5568
5569/** \#DE - 00. */
5570DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5571{
5572 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5573}
5574
5575
5576/** \#DB - 01.
5577 * @note This automatically clear DR7.GD. */
5578DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5579{
5580 /** @todo set/clear RF. */
5581 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5582 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5583}
5584
5585
5586/** \#BR - 05. */
5587DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5588{
5589 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5590}
5591
5592
5593/** \#UD - 06. */
5594DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5595{
5596 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5597}
5598
5599
5600/** \#NM - 07. */
5601DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5602{
5603 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5604}
5605
5606
5607/** \#TS(err) - 0a. */
5608DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5609{
5610 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5611}
5612
5613
5614/** \#TS(tr) - 0a. */
5615DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5616{
5617 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5618 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5619}
5620
5621
5622/** \#TS(0) - 0a. */
5623DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5624{
5625 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5626 0, 0);
5627}
5628
5629
5630/** \#TS(err) - 0a. */
5631DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5632{
5633 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5634 uSel & X86_SEL_MASK_OFF_RPL, 0);
5635}
5636
5637
5638/** \#NP(err) - 0b. */
5639DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5640{
5641 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5642}
5643
5644
5645/** \#NP(sel) - 0b. */
5646DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5647{
5648 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5649 uSel & ~X86_SEL_RPL, 0);
5650}
5651
5652
5653/** \#SS(seg) - 0c. */
5654DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5655{
5656 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5657 uSel & ~X86_SEL_RPL, 0);
5658}
5659
5660
5661/** \#SS(err) - 0c. */
5662DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5663{
5664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5665}
5666
5667
5668/** \#GP(n) - 0d. */
5669DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5670{
5671 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5672}
5673
5674
5675/** \#GP(0) - 0d. */
5676DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5677{
5678 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5679}
5680
5681#ifdef IEM_WITH_SETJMP
5682/** \#GP(0) - 0d. */
5683DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5684{
5685 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5686}
5687#endif
5688
5689
5690/** \#GP(sel) - 0d. */
5691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5692{
5693 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5694 Sel & ~X86_SEL_RPL, 0);
5695}
5696
5697
5698/** \#GP(0) - 0d. */
5699DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5700{
5701 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5702}
5703
5704
5705/** \#GP(sel) - 0d. */
5706DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5707{
5708 NOREF(iSegReg); NOREF(fAccess);
5709 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5710 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5711}
5712
5713#ifdef IEM_WITH_SETJMP
5714/** \#GP(sel) - 0d, longjmp. */
5715DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5716{
5717 NOREF(iSegReg); NOREF(fAccess);
5718 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5719 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5720}
5721#endif
5722
5723/** \#GP(sel) - 0d. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5725{
5726 NOREF(Sel);
5727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5728}
5729
5730#ifdef IEM_WITH_SETJMP
5731/** \#GP(sel) - 0d, longjmp. */
5732DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5733{
5734 NOREF(Sel);
5735 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5736}
5737#endif
5738
5739
5740/** \#GP(sel) - 0d. */
5741DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5742{
5743 NOREF(iSegReg); NOREF(fAccess);
5744 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5745}
5746
5747#ifdef IEM_WITH_SETJMP
5748/** \#GP(sel) - 0d, longjmp. */
5749DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5750 uint32_t fAccess)
5751{
5752 NOREF(iSegReg); NOREF(fAccess);
5753 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5754}
5755#endif
5756
5757
5758/** \#PF(n) - 0e. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5760{
5761 uint16_t uErr;
5762 switch (rc)
5763 {
5764 case VERR_PAGE_NOT_PRESENT:
5765 case VERR_PAGE_TABLE_NOT_PRESENT:
5766 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5767 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5768 uErr = 0;
5769 break;
5770
5771 default:
5772 AssertMsgFailed(("%Rrc\n", rc));
5773 /* fall thru */
5774 case VERR_ACCESS_DENIED:
5775 uErr = X86_TRAP_PF_P;
5776 break;
5777
5778 /** @todo reserved */
5779 }
5780
5781 if (pVCpu->iem.s.uCpl == 3)
5782 uErr |= X86_TRAP_PF_US;
5783
5784 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5785 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5786 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5787 uErr |= X86_TRAP_PF_ID;
5788
5789#if 0 /* This is so much non-sense, really. Why was it done like that? */
5790 /* Note! RW access callers reporting a WRITE protection fault, will clear
5791 the READ flag before calling. So, read-modify-write accesses (RW)
5792 can safely be reported as READ faults. */
5793 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5794 uErr |= X86_TRAP_PF_RW;
5795#else
5796 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5797 {
5798 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5799 uErr |= X86_TRAP_PF_RW;
5800 }
5801#endif
5802
5803 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5804 uErr, GCPtrWhere);
5805}
5806
5807#ifdef IEM_WITH_SETJMP
5808/** \#PF(n) - 0e, longjmp. */
5809IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5810{
5811 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5812}
5813#endif
5814
5815
5816/** \#MF(0) - 10. */
5817DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5818{
5819 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5820}
5821
5822
5823/** \#AC(0) - 11. */
5824DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5825{
5826 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5827}
5828
5829
5830/**
5831 * Macro for calling iemCImplRaiseDivideError().
5832 *
5833 * This enables us to add/remove arguments and force different levels of
5834 * inlining as we wish.
5835 *
5836 * @return Strict VBox status code.
5837 */
5838#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5839IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5840{
5841 NOREF(cbInstr);
5842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5843}
5844
5845
5846/**
5847 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5848 *
5849 * This enables us to add/remove arguments and force different levels of
5850 * inlining as we wish.
5851 *
5852 * @return Strict VBox status code.
5853 */
5854#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5855IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5856{
5857 NOREF(cbInstr);
5858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5859}
5860
5861
5862/**
5863 * Macro for calling iemCImplRaiseInvalidOpcode().
5864 *
5865 * This enables us to add/remove arguments and force different levels of
5866 * inlining as we wish.
5867 *
5868 * @return Strict VBox status code.
5869 */
5870#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5871IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5872{
5873 NOREF(cbInstr);
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5875}
5876
5877
5878/** @} */
5879
5880
5881/*
5882 *
5883 * Helpers routines.
5884 * Helpers routines.
5885 * Helpers routines.
5886 *
5887 */
5888
5889/**
5890 * Recalculates the effective operand size.
5891 *
5892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5893 */
5894IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5895{
5896 switch (pVCpu->iem.s.enmCpuMode)
5897 {
5898 case IEMMODE_16BIT:
5899 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5900 break;
5901 case IEMMODE_32BIT:
5902 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5903 break;
5904 case IEMMODE_64BIT:
5905 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5906 {
5907 case 0:
5908 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5909 break;
5910 case IEM_OP_PRF_SIZE_OP:
5911 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5912 break;
5913 case IEM_OP_PRF_SIZE_REX_W:
5914 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5915 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5916 break;
5917 }
5918 break;
5919 default:
5920 AssertFailed();
5921 }
5922}
5923
5924
5925/**
5926 * Sets the default operand size to 64-bit and recalculates the effective
5927 * operand size.
5928 *
5929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5930 */
5931IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5932{
5933 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5934 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5935 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5936 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5937 else
5938 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5939}
5940
5941
5942/*
5943 *
5944 * Common opcode decoders.
5945 * Common opcode decoders.
5946 * Common opcode decoders.
5947 *
5948 */
5949//#include <iprt/mem.h>
5950
5951/**
5952 * Used to add extra details about a stub case.
5953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5954 */
5955IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5956{
5957#if defined(LOG_ENABLED) && defined(IN_RING3)
5958 PVM pVM = pVCpu->CTX_SUFF(pVM);
5959 char szRegs[4096];
5960 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5961 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5962 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5963 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5964 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5965 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5966 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5967 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5968 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5969 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5970 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5971 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5972 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5973 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5974 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5975 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5976 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5977 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5978 " efer=%016VR{efer}\n"
5979 " pat=%016VR{pat}\n"
5980 " sf_mask=%016VR{sf_mask}\n"
5981 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5982 " lstar=%016VR{lstar}\n"
5983 " star=%016VR{star} cstar=%016VR{cstar}\n"
5984 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5985 );
5986
5987 char szInstr[256];
5988 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5989 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5990 szInstr, sizeof(szInstr), NULL);
5991
5992 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5993#else
5994 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5995#endif
5996}
5997
5998/**
5999 * Complains about a stub.
6000 *
6001 * Providing two versions of this macro, one for daily use and one for use when
6002 * working on IEM.
6003 */
6004#if 0
6005# define IEMOP_BITCH_ABOUT_STUB() \
6006 do { \
6007 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6008 iemOpStubMsg2(pVCpu); \
6009 RTAssertPanic(); \
6010 } while (0)
6011#else
6012# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6013#endif
6014
6015/** Stubs an opcode. */
6016#define FNIEMOP_STUB(a_Name) \
6017 FNIEMOP_DEF(a_Name) \
6018 { \
6019 RT_NOREF_PV(pVCpu); \
6020 IEMOP_BITCH_ABOUT_STUB(); \
6021 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6022 } \
6023 typedef int ignore_semicolon
6024
6025/** Stubs an opcode. */
6026#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6027 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6028 { \
6029 RT_NOREF_PV(pVCpu); \
6030 RT_NOREF_PV(a_Name0); \
6031 IEMOP_BITCH_ABOUT_STUB(); \
6032 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6033 } \
6034 typedef int ignore_semicolon
6035
6036/** Stubs an opcode which currently should raise \#UD. */
6037#define FNIEMOP_UD_STUB(a_Name) \
6038 FNIEMOP_DEF(a_Name) \
6039 { \
6040 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6041 return IEMOP_RAISE_INVALID_OPCODE(); \
6042 } \
6043 typedef int ignore_semicolon
6044
6045/** Stubs an opcode which currently should raise \#UD. */
6046#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6047 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6048 { \
6049 RT_NOREF_PV(pVCpu); \
6050 RT_NOREF_PV(a_Name0); \
6051 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6052 return IEMOP_RAISE_INVALID_OPCODE(); \
6053 } \
6054 typedef int ignore_semicolon
6055
6056
6057
6058/** @name Register Access.
6059 * @{
6060 */
6061
6062/**
6063 * Gets a reference (pointer) to the specified hidden segment register.
6064 *
6065 * @returns Hidden register reference.
6066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6067 * @param iSegReg The segment register.
6068 */
6069IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6070{
6071 Assert(iSegReg < X86_SREG_COUNT);
6072 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6073 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6074
6075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6076 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6077 { /* likely */ }
6078 else
6079 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6080#else
6081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6082#endif
6083 return pSReg;
6084}
6085
6086
6087/**
6088 * Ensures that the given hidden segment register is up to date.
6089 *
6090 * @returns Hidden register reference.
6091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6092 * @param pSReg The segment register.
6093 */
6094IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6095{
6096#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6097 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6098 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6099#else
6100 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6101 NOREF(pVCpu);
6102#endif
6103 return pSReg;
6104}
6105
6106
6107/**
6108 * Gets a reference (pointer) to the specified segment register (the selector
6109 * value).
6110 *
6111 * @returns Pointer to the selector variable.
6112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6113 * @param iSegReg The segment register.
6114 */
6115DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6116{
6117 Assert(iSegReg < X86_SREG_COUNT);
6118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6119 return &pCtx->aSRegs[iSegReg].Sel;
6120}
6121
6122
6123/**
6124 * Fetches the selector value of a segment register.
6125 *
6126 * @returns The selector value.
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param iSegReg The segment register.
6129 */
6130DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6131{
6132 Assert(iSegReg < X86_SREG_COUNT);
6133 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6134}
6135
6136
6137/**
6138 * Gets a reference (pointer) to the specified general purpose register.
6139 *
6140 * @returns Register reference.
6141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6142 * @param iReg The general purpose register.
6143 */
6144DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6145{
6146 Assert(iReg < 16);
6147 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6148 return &pCtx->aGRegs[iReg];
6149}
6150
6151
6152/**
6153 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6154 *
6155 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6156 *
6157 * @returns Register reference.
6158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6159 * @param iReg The register.
6160 */
6161DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6162{
6163 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6164 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6165 {
6166 Assert(iReg < 16);
6167 return &pCtx->aGRegs[iReg].u8;
6168 }
6169 /* high 8-bit register. */
6170 Assert(iReg < 8);
6171 return &pCtx->aGRegs[iReg & 3].bHi;
6172}
6173
6174
6175/**
6176 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6177 *
6178 * @returns Register reference.
6179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6180 * @param iReg The register.
6181 */
6182DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6183{
6184 Assert(iReg < 16);
6185 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6186 return &pCtx->aGRegs[iReg].u16;
6187}
6188
6189
6190/**
6191 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6192 *
6193 * @returns Register reference.
6194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6195 * @param iReg The register.
6196 */
6197DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6198{
6199 Assert(iReg < 16);
6200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6201 return &pCtx->aGRegs[iReg].u32;
6202}
6203
6204
6205/**
6206 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6207 *
6208 * @returns Register reference.
6209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6210 * @param iReg The register.
6211 */
6212DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6213{
6214 Assert(iReg < 64);
6215 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6216 return &pCtx->aGRegs[iReg].u64;
6217}
6218
6219
6220/**
6221 * Fetches the value of a 8-bit general purpose register.
6222 *
6223 * @returns The register value.
6224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6225 * @param iReg The register.
6226 */
6227DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6228{
6229 return *iemGRegRefU8(pVCpu, iReg);
6230}
6231
6232
6233/**
6234 * Fetches the value of a 16-bit general purpose register.
6235 *
6236 * @returns The register value.
6237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6238 * @param iReg The register.
6239 */
6240DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6241{
6242 Assert(iReg < 16);
6243 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6244}
6245
6246
6247/**
6248 * Fetches the value of a 32-bit general purpose register.
6249 *
6250 * @returns The register value.
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 * @param iReg The register.
6253 */
6254DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6255{
6256 Assert(iReg < 16);
6257 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6258}
6259
6260
6261/**
6262 * Fetches the value of a 64-bit general purpose register.
6263 *
6264 * @returns The register value.
6265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6266 * @param iReg The register.
6267 */
6268DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6269{
6270 Assert(iReg < 16);
6271 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6272}
6273
6274
6275/**
6276 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6277 *
6278 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6279 * segment limit.
6280 *
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 * @param offNextInstr The offset of the next instruction.
6283 */
6284IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6285{
6286 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6287 switch (pVCpu->iem.s.enmEffOpSize)
6288 {
6289 case IEMMODE_16BIT:
6290 {
6291 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6292 if ( uNewIp > pCtx->cs.u32Limit
6293 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6294 return iemRaiseGeneralProtectionFault0(pVCpu);
6295 pCtx->rip = uNewIp;
6296 break;
6297 }
6298
6299 case IEMMODE_32BIT:
6300 {
6301 Assert(pCtx->rip <= UINT32_MAX);
6302 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6303
6304 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6305 if (uNewEip > pCtx->cs.u32Limit)
6306 return iemRaiseGeneralProtectionFault0(pVCpu);
6307 pCtx->rip = uNewEip;
6308 break;
6309 }
6310
6311 case IEMMODE_64BIT:
6312 {
6313 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6314
6315 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6316 if (!IEM_IS_CANONICAL(uNewRip))
6317 return iemRaiseGeneralProtectionFault0(pVCpu);
6318 pCtx->rip = uNewRip;
6319 break;
6320 }
6321
6322 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6323 }
6324
6325 pCtx->eflags.Bits.u1RF = 0;
6326
6327#ifndef IEM_WITH_CODE_TLB
6328 /* Flush the prefetch buffer. */
6329 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6330#endif
6331
6332 return VINF_SUCCESS;
6333}
6334
6335
6336/**
6337 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6338 *
6339 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6340 * segment limit.
6341 *
6342 * @returns Strict VBox status code.
6343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6344 * @param offNextInstr The offset of the next instruction.
6345 */
6346IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6347{
6348 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6349 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6350
6351 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6352 if ( uNewIp > pCtx->cs.u32Limit
6353 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6354 return iemRaiseGeneralProtectionFault0(pVCpu);
6355 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6356 pCtx->rip = uNewIp;
6357 pCtx->eflags.Bits.u1RF = 0;
6358
6359#ifndef IEM_WITH_CODE_TLB
6360 /* Flush the prefetch buffer. */
6361 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6362#endif
6363
6364 return VINF_SUCCESS;
6365}
6366
6367
6368/**
6369 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6370 *
6371 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6372 * segment limit.
6373 *
6374 * @returns Strict VBox status code.
6375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6376 * @param offNextInstr The offset of the next instruction.
6377 */
6378IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6379{
6380 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6381 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6382
6383 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6384 {
6385 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6386
6387 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6388 if (uNewEip > pCtx->cs.u32Limit)
6389 return iemRaiseGeneralProtectionFault0(pVCpu);
6390 pCtx->rip = uNewEip;
6391 }
6392 else
6393 {
6394 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6395
6396 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6397 if (!IEM_IS_CANONICAL(uNewRip))
6398 return iemRaiseGeneralProtectionFault0(pVCpu);
6399 pCtx->rip = uNewRip;
6400 }
6401 pCtx->eflags.Bits.u1RF = 0;
6402
6403#ifndef IEM_WITH_CODE_TLB
6404 /* Flush the prefetch buffer. */
6405 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6406#endif
6407
6408 return VINF_SUCCESS;
6409}
6410
6411
6412/**
6413 * Performs a near jump to the specified address.
6414 *
6415 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6416 * segment limit.
6417 *
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 * @param uNewRip The new RIP value.
6420 */
6421IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6422{
6423 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6424 switch (pVCpu->iem.s.enmEffOpSize)
6425 {
6426 case IEMMODE_16BIT:
6427 {
6428 Assert(uNewRip <= UINT16_MAX);
6429 if ( uNewRip > pCtx->cs.u32Limit
6430 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6431 return iemRaiseGeneralProtectionFault0(pVCpu);
6432 /** @todo Test 16-bit jump in 64-bit mode. */
6433 pCtx->rip = uNewRip;
6434 break;
6435 }
6436
6437 case IEMMODE_32BIT:
6438 {
6439 Assert(uNewRip <= UINT32_MAX);
6440 Assert(pCtx->rip <= UINT32_MAX);
6441 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6442
6443 if (uNewRip > pCtx->cs.u32Limit)
6444 return iemRaiseGeneralProtectionFault0(pVCpu);
6445 pCtx->rip = uNewRip;
6446 break;
6447 }
6448
6449 case IEMMODE_64BIT:
6450 {
6451 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6452
6453 if (!IEM_IS_CANONICAL(uNewRip))
6454 return iemRaiseGeneralProtectionFault0(pVCpu);
6455 pCtx->rip = uNewRip;
6456 break;
6457 }
6458
6459 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6460 }
6461
6462 pCtx->eflags.Bits.u1RF = 0;
6463
6464#ifndef IEM_WITH_CODE_TLB
6465 /* Flush the prefetch buffer. */
6466 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6467#endif
6468
6469 return VINF_SUCCESS;
6470}
6471
6472
6473/**
6474 * Get the address of the top of the stack.
6475 *
6476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6477 * @param pCtx The CPU context which SP/ESP/RSP should be
6478 * read.
6479 */
6480DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6481{
6482 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6483 return pCtx->rsp;
6484 if (pCtx->ss.Attr.n.u1DefBig)
6485 return pCtx->esp;
6486 return pCtx->sp;
6487}
6488
6489
6490/**
6491 * Updates the RIP/EIP/IP to point to the next instruction.
6492 *
6493 * This function leaves the EFLAGS.RF flag alone.
6494 *
6495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6496 * @param cbInstr The number of bytes to add.
6497 */
6498IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6499{
6500 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6501 switch (pVCpu->iem.s.enmCpuMode)
6502 {
6503 case IEMMODE_16BIT:
6504 Assert(pCtx->rip <= UINT16_MAX);
6505 pCtx->eip += cbInstr;
6506 pCtx->eip &= UINT32_C(0xffff);
6507 break;
6508
6509 case IEMMODE_32BIT:
6510 pCtx->eip += cbInstr;
6511 Assert(pCtx->rip <= UINT32_MAX);
6512 break;
6513
6514 case IEMMODE_64BIT:
6515 pCtx->rip += cbInstr;
6516 break;
6517 default: AssertFailed();
6518 }
6519}
6520
6521
6522#if 0
6523/**
6524 * Updates the RIP/EIP/IP to point to the next instruction.
6525 *
6526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6527 */
6528IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6529{
6530 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6531}
6532#endif
6533
6534
6535
6536/**
6537 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6538 *
6539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6540 * @param cbInstr The number of bytes to add.
6541 */
6542IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6543{
6544 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6545
6546 pCtx->eflags.Bits.u1RF = 0;
6547
6548 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6549#if ARCH_BITS >= 64
6550 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6551 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6552 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6553#else
6554 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6555 pCtx->rip += cbInstr;
6556 else
6557 {
6558 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6559 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6560 }
6561#endif
6562}
6563
6564
6565/**
6566 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6567 *
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 */
6570IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6571{
6572 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6573}
6574
6575
6576/**
6577 * Adds to the stack pointer.
6578 *
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 * @param pCtx The CPU context which SP/ESP/RSP should be
6581 * updated.
6582 * @param cbToAdd The number of bytes to add (8-bit!).
6583 */
6584DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6585{
6586 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6587 pCtx->rsp += cbToAdd;
6588 else if (pCtx->ss.Attr.n.u1DefBig)
6589 pCtx->esp += cbToAdd;
6590 else
6591 pCtx->sp += cbToAdd;
6592}
6593
6594
6595/**
6596 * Subtracts from the stack pointer.
6597 *
6598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6599 * @param pCtx The CPU context which SP/ESP/RSP should be
6600 * updated.
6601 * @param cbToSub The number of bytes to subtract (8-bit!).
6602 */
6603DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6604{
6605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6606 pCtx->rsp -= cbToSub;
6607 else if (pCtx->ss.Attr.n.u1DefBig)
6608 pCtx->esp -= cbToSub;
6609 else
6610 pCtx->sp -= cbToSub;
6611}
6612
6613
6614/**
6615 * Adds to the temporary stack pointer.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6619 * @param cbToAdd The number of bytes to add (16-bit).
6620 * @param pCtx Where to get the current stack mode.
6621 */
6622DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6623{
6624 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6625 pTmpRsp->u += cbToAdd;
6626 else if (pCtx->ss.Attr.n.u1DefBig)
6627 pTmpRsp->DWords.dw0 += cbToAdd;
6628 else
6629 pTmpRsp->Words.w0 += cbToAdd;
6630}
6631
6632
6633/**
6634 * Subtracts from the temporary stack pointer.
6635 *
6636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6637 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6638 * @param cbToSub The number of bytes to subtract.
6639 * @param pCtx Where to get the current stack mode.
6640 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6641 * expecting that.
6642 */
6643DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6644{
6645 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6646 pTmpRsp->u -= cbToSub;
6647 else if (pCtx->ss.Attr.n.u1DefBig)
6648 pTmpRsp->DWords.dw0 -= cbToSub;
6649 else
6650 pTmpRsp->Words.w0 -= cbToSub;
6651}
6652
6653
6654/**
6655 * Calculates the effective stack address for a push of the specified size as
6656 * well as the new RSP value (upper bits may be masked).
6657 *
6658 * @returns Effective stack addressf for the push.
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param pCtx Where to get the current stack mode.
6661 * @param cbItem The size of the stack item to pop.
6662 * @param puNewRsp Where to return the new RSP value.
6663 */
6664DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6665{
6666 RTUINT64U uTmpRsp;
6667 RTGCPTR GCPtrTop;
6668 uTmpRsp.u = pCtx->rsp;
6669
6670 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6671 GCPtrTop = uTmpRsp.u -= cbItem;
6672 else if (pCtx->ss.Attr.n.u1DefBig)
6673 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6674 else
6675 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6676 *puNewRsp = uTmpRsp.u;
6677 return GCPtrTop;
6678}
6679
6680
6681/**
6682 * Gets the current stack pointer and calculates the value after a pop of the
6683 * specified size.
6684 *
6685 * @returns Current stack pointer.
6686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6687 * @param pCtx Where to get the current stack mode.
6688 * @param cbItem The size of the stack item to pop.
6689 * @param puNewRsp Where to return the new RSP value.
6690 */
6691DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6692{
6693 RTUINT64U uTmpRsp;
6694 RTGCPTR GCPtrTop;
6695 uTmpRsp.u = pCtx->rsp;
6696
6697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6698 {
6699 GCPtrTop = uTmpRsp.u;
6700 uTmpRsp.u += cbItem;
6701 }
6702 else if (pCtx->ss.Attr.n.u1DefBig)
6703 {
6704 GCPtrTop = uTmpRsp.DWords.dw0;
6705 uTmpRsp.DWords.dw0 += cbItem;
6706 }
6707 else
6708 {
6709 GCPtrTop = uTmpRsp.Words.w0;
6710 uTmpRsp.Words.w0 += cbItem;
6711 }
6712 *puNewRsp = uTmpRsp.u;
6713 return GCPtrTop;
6714}
6715
6716
6717/**
6718 * Calculates the effective stack address for a push of the specified size as
6719 * well as the new temporary RSP value (upper bits may be masked).
6720 *
6721 * @returns Effective stack addressf for the push.
6722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6723 * @param pCtx Where to get the current stack mode.
6724 * @param pTmpRsp The temporary stack pointer. This is updated.
6725 * @param cbItem The size of the stack item to pop.
6726 */
6727DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6728{
6729 RTGCPTR GCPtrTop;
6730
6731 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6732 GCPtrTop = pTmpRsp->u -= cbItem;
6733 else if (pCtx->ss.Attr.n.u1DefBig)
6734 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6735 else
6736 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6737 return GCPtrTop;
6738}
6739
6740
6741/**
6742 * Gets the effective stack address for a pop of the specified size and
6743 * calculates and updates the temporary RSP.
6744 *
6745 * @returns Current stack pointer.
6746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6747 * @param pCtx Where to get the current stack mode.
6748 * @param pTmpRsp The temporary stack pointer. This is updated.
6749 * @param cbItem The size of the stack item to pop.
6750 */
6751DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6752{
6753 RTGCPTR GCPtrTop;
6754 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6755 {
6756 GCPtrTop = pTmpRsp->u;
6757 pTmpRsp->u += cbItem;
6758 }
6759 else if (pCtx->ss.Attr.n.u1DefBig)
6760 {
6761 GCPtrTop = pTmpRsp->DWords.dw0;
6762 pTmpRsp->DWords.dw0 += cbItem;
6763 }
6764 else
6765 {
6766 GCPtrTop = pTmpRsp->Words.w0;
6767 pTmpRsp->Words.w0 += cbItem;
6768 }
6769 return GCPtrTop;
6770}
6771
6772/** @} */
6773
6774
6775/** @name FPU access and helpers.
6776 *
6777 * @{
6778 */
6779
6780
6781/**
6782 * Hook for preparing to use the host FPU.
6783 *
6784 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6785 *
6786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6787 */
6788DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6789{
6790#ifdef IN_RING3
6791 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6792#else
6793 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6794#endif
6795}
6796
6797
6798/**
6799 * Hook for preparing to use the host FPU for SSE.
6800 *
6801 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6802 *
6803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6804 */
6805DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6806{
6807 iemFpuPrepareUsage(pVCpu);
6808}
6809
6810
6811/**
6812 * Hook for preparing to use the host FPU for AVX.
6813 *
6814 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6815 *
6816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6817 */
6818DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6819{
6820 iemFpuPrepareUsage(pVCpu);
6821}
6822
6823
6824/**
6825 * Hook for actualizing the guest FPU state before the interpreter reads it.
6826 *
6827 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6828 *
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 */
6831DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6832{
6833#ifdef IN_RING3
6834 NOREF(pVCpu);
6835#else
6836 CPUMRZFpuStateActualizeForRead(pVCpu);
6837#endif
6838}
6839
6840
6841/**
6842 * Hook for actualizing the guest FPU state before the interpreter changes it.
6843 *
6844 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6845 *
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 */
6848DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6849{
6850#ifdef IN_RING3
6851 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6852#else
6853 CPUMRZFpuStateActualizeForChange(pVCpu);
6854#endif
6855}
6856
6857
6858/**
6859 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6860 * only.
6861 *
6862 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6863 *
6864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6865 */
6866DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6867{
6868#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6869 NOREF(pVCpu);
6870#else
6871 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6872#endif
6873}
6874
6875
6876/**
6877 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6878 * read+write.
6879 *
6880 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6881 *
6882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6883 */
6884DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6885{
6886#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6887 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6888#else
6889 CPUMRZFpuStateActualizeForChange(pVCpu);
6890#endif
6891}
6892
6893
6894/**
6895 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6896 * only.
6897 *
6898 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6899 *
6900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6901 */
6902DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6903{
6904#ifdef IN_RING3
6905 NOREF(pVCpu);
6906#else
6907 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6908#endif
6909}
6910
6911
6912/**
6913 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6914 * read+write.
6915 *
6916 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6917 *
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 */
6920DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6921{
6922#ifdef IN_RING3
6923 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6924#else
6925 CPUMRZFpuStateActualizeForChange(pVCpu);
6926#endif
6927}
6928
6929
6930/**
6931 * Stores a QNaN value into a FPU register.
6932 *
6933 * @param pReg Pointer to the register.
6934 */
6935DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6936{
6937 pReg->au32[0] = UINT32_C(0x00000000);
6938 pReg->au32[1] = UINT32_C(0xc0000000);
6939 pReg->au16[4] = UINT16_C(0xffff);
6940}
6941
6942
6943/**
6944 * Updates the FOP, FPU.CS and FPUIP registers.
6945 *
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 * @param pCtx The CPU context.
6948 * @param pFpuCtx The FPU context.
6949 */
6950DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6951{
6952 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6953 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6954 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6955 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6956 {
6957 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6958 * happens in real mode here based on the fnsave and fnstenv images. */
6959 pFpuCtx->CS = 0;
6960 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6961 }
6962 else
6963 {
6964 pFpuCtx->CS = pCtx->cs.Sel;
6965 pFpuCtx->FPUIP = pCtx->rip;
6966 }
6967}
6968
6969
6970/**
6971 * Updates the x87.DS and FPUDP registers.
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 * @param pCtx The CPU context.
6975 * @param pFpuCtx The FPU context.
6976 * @param iEffSeg The effective segment register.
6977 * @param GCPtrEff The effective address relative to @a iEffSeg.
6978 */
6979DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6980{
6981 RTSEL sel;
6982 switch (iEffSeg)
6983 {
6984 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6985 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6986 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6987 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6988 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6989 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6990 default:
6991 AssertMsgFailed(("%d\n", iEffSeg));
6992 sel = pCtx->ds.Sel;
6993 }
6994 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6995 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6996 {
6997 pFpuCtx->DS = 0;
6998 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6999 }
7000 else
7001 {
7002 pFpuCtx->DS = sel;
7003 pFpuCtx->FPUDP = GCPtrEff;
7004 }
7005}
7006
7007
7008/**
7009 * Rotates the stack registers in the push direction.
7010 *
7011 * @param pFpuCtx The FPU context.
7012 * @remarks This is a complete waste of time, but fxsave stores the registers in
7013 * stack order.
7014 */
7015DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7016{
7017 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7018 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7019 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7020 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7021 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7022 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7023 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7024 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7025 pFpuCtx->aRegs[0].r80 = r80Tmp;
7026}
7027
7028
7029/**
7030 * Rotates the stack registers in the pop direction.
7031 *
7032 * @param pFpuCtx The FPU context.
7033 * @remarks This is a complete waste of time, but fxsave stores the registers in
7034 * stack order.
7035 */
7036DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7037{
7038 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7039 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7040 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7041 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7042 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7043 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7044 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7045 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7046 pFpuCtx->aRegs[7].r80 = r80Tmp;
7047}
7048
7049
7050/**
7051 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7052 * exception prevents it.
7053 *
7054 * @param pResult The FPU operation result to push.
7055 * @param pFpuCtx The FPU context.
7056 */
7057IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7058{
7059 /* Update FSW and bail if there are pending exceptions afterwards. */
7060 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7061 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7062 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7063 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7064 {
7065 pFpuCtx->FSW = fFsw;
7066 return;
7067 }
7068
7069 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7070 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7071 {
7072 /* All is fine, push the actual value. */
7073 pFpuCtx->FTW |= RT_BIT(iNewTop);
7074 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7075 }
7076 else if (pFpuCtx->FCW & X86_FCW_IM)
7077 {
7078 /* Masked stack overflow, push QNaN. */
7079 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7080 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7081 }
7082 else
7083 {
7084 /* Raise stack overflow, don't push anything. */
7085 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7086 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7087 return;
7088 }
7089
7090 fFsw &= ~X86_FSW_TOP_MASK;
7091 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7092 pFpuCtx->FSW = fFsw;
7093
7094 iemFpuRotateStackPush(pFpuCtx);
7095}
7096
7097
7098/**
7099 * Stores a result in a FPU register and updates the FSW and FTW.
7100 *
7101 * @param pFpuCtx The FPU context.
7102 * @param pResult The result to store.
7103 * @param iStReg Which FPU register to store it in.
7104 */
7105IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7106{
7107 Assert(iStReg < 8);
7108 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7109 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7110 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7111 pFpuCtx->FTW |= RT_BIT(iReg);
7112 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7113}
7114
7115
7116/**
7117 * Only updates the FPU status word (FSW) with the result of the current
7118 * instruction.
7119 *
7120 * @param pFpuCtx The FPU context.
7121 * @param u16FSW The FSW output of the current instruction.
7122 */
7123IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7124{
7125 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7126 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7127}
7128
7129
7130/**
7131 * Pops one item off the FPU stack if no pending exception prevents it.
7132 *
7133 * @param pFpuCtx The FPU context.
7134 */
7135IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7136{
7137 /* Check pending exceptions. */
7138 uint16_t uFSW = pFpuCtx->FSW;
7139 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7140 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7141 return;
7142
7143 /* TOP--. */
7144 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7145 uFSW &= ~X86_FSW_TOP_MASK;
7146 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7147 pFpuCtx->FSW = uFSW;
7148
7149 /* Mark the previous ST0 as empty. */
7150 iOldTop >>= X86_FSW_TOP_SHIFT;
7151 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7152
7153 /* Rotate the registers. */
7154 iemFpuRotateStackPop(pFpuCtx);
7155}
7156
7157
7158/**
7159 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7160 *
7161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7162 * @param pResult The FPU operation result to push.
7163 */
7164IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7165{
7166 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7167 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7168 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7169 iemFpuMaybePushResult(pResult, pFpuCtx);
7170}
7171
7172
7173/**
7174 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7175 * and sets FPUDP and FPUDS.
7176 *
7177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7178 * @param pResult The FPU operation result to push.
7179 * @param iEffSeg The effective segment register.
7180 * @param GCPtrEff The effective address relative to @a iEffSeg.
7181 */
7182IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7183{
7184 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7185 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7186 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7187 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7188 iemFpuMaybePushResult(pResult, pFpuCtx);
7189}
7190
7191
7192/**
7193 * Replace ST0 with the first value and push the second onto the FPU stack,
7194 * unless a pending exception prevents it.
7195 *
7196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7197 * @param pResult The FPU operation result to store and push.
7198 */
7199IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7200{
7201 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7202 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7203 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7204
7205 /* Update FSW and bail if there are pending exceptions afterwards. */
7206 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7207 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7208 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7209 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7210 {
7211 pFpuCtx->FSW = fFsw;
7212 return;
7213 }
7214
7215 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7216 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7217 {
7218 /* All is fine, push the actual value. */
7219 pFpuCtx->FTW |= RT_BIT(iNewTop);
7220 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7221 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7222 }
7223 else if (pFpuCtx->FCW & X86_FCW_IM)
7224 {
7225 /* Masked stack overflow, push QNaN. */
7226 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7227 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7228 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7229 }
7230 else
7231 {
7232 /* Raise stack overflow, don't push anything. */
7233 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7234 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7235 return;
7236 }
7237
7238 fFsw &= ~X86_FSW_TOP_MASK;
7239 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7240 pFpuCtx->FSW = fFsw;
7241
7242 iemFpuRotateStackPush(pFpuCtx);
7243}
7244
7245
7246/**
7247 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7248 * FOP.
7249 *
7250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7251 * @param pResult The result to store.
7252 * @param iStReg Which FPU register to store it in.
7253 */
7254IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7255{
7256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7257 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7258 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7259 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7260}
7261
7262
7263/**
7264 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7265 * FOP, and then pops the stack.
7266 *
7267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7268 * @param pResult The result to store.
7269 * @param iStReg Which FPU register to store it in.
7270 */
7271IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7272{
7273 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7274 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7275 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7276 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7277 iemFpuMaybePopOne(pFpuCtx);
7278}
7279
7280
7281/**
7282 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7283 * FPUDP, and FPUDS.
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 * @param pResult The result to store.
7287 * @param iStReg Which FPU register to store it in.
7288 * @param iEffSeg The effective memory operand selector register.
7289 * @param GCPtrEff The effective memory operand offset.
7290 */
7291IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7292 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7293{
7294 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7295 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7296 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7298 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7299}
7300
7301
7302/**
7303 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7304 * FPUDP, and FPUDS, and then pops the stack.
7305 *
7306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7307 * @param pResult The result to store.
7308 * @param iStReg Which FPU register to store it in.
7309 * @param iEffSeg The effective memory operand selector register.
7310 * @param GCPtrEff The effective memory operand offset.
7311 */
7312IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7313 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7314{
7315 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7316 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7317 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7318 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7319 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7320 iemFpuMaybePopOne(pFpuCtx);
7321}
7322
7323
7324/**
7325 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7326 *
7327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7328 */
7329IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7330{
7331 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7332 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7333 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7334}
7335
7336
7337/**
7338 * Marks the specified stack register as free (for FFREE).
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param iStReg The register to free.
7342 */
7343IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7344{
7345 Assert(iStReg < 8);
7346 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7347 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7348 pFpuCtx->FTW &= ~RT_BIT(iReg);
7349}
7350
7351
7352/**
7353 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7354 *
7355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7356 */
7357IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7358{
7359 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7360 uint16_t uFsw = pFpuCtx->FSW;
7361 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7362 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7363 uFsw &= ~X86_FSW_TOP_MASK;
7364 uFsw |= uTop;
7365 pFpuCtx->FSW = uFsw;
7366}
7367
7368
7369/**
7370 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7371 *
7372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7373 */
7374IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7375{
7376 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7377 uint16_t uFsw = pFpuCtx->FSW;
7378 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7379 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7380 uFsw &= ~X86_FSW_TOP_MASK;
7381 uFsw |= uTop;
7382 pFpuCtx->FSW = uFsw;
7383}
7384
7385
7386/**
7387 * Updates the FSW, FOP, FPUIP, and FPUCS.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param u16FSW The FSW from the current instruction.
7391 */
7392IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7393{
7394 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7395 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7396 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7397 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7398}
7399
7400
7401/**
7402 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7403 *
7404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7405 * @param u16FSW The FSW from the current instruction.
7406 */
7407IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7408{
7409 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7410 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7411 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7412 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7413 iemFpuMaybePopOne(pFpuCtx);
7414}
7415
7416
7417/**
7418 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7419 *
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 * @param u16FSW The FSW from the current instruction.
7422 * @param iEffSeg The effective memory operand selector register.
7423 * @param GCPtrEff The effective memory operand offset.
7424 */
7425IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7426{
7427 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7428 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7429 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7430 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7431 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7432}
7433
7434
7435/**
7436 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7437 *
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 * @param u16FSW The FSW from the current instruction.
7440 */
7441IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7442{
7443 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7444 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7445 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7446 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7447 iemFpuMaybePopOne(pFpuCtx);
7448 iemFpuMaybePopOne(pFpuCtx);
7449}
7450
7451
7452/**
7453 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7454 *
7455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7456 * @param u16FSW The FSW from the current instruction.
7457 * @param iEffSeg The effective memory operand selector register.
7458 * @param GCPtrEff The effective memory operand offset.
7459 */
7460IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7461{
7462 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7463 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7464 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7465 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7466 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7467 iemFpuMaybePopOne(pFpuCtx);
7468}
7469
7470
7471/**
7472 * Worker routine for raising an FPU stack underflow exception.
7473 *
7474 * @param pFpuCtx The FPU context.
7475 * @param iStReg The stack register being accessed.
7476 */
7477IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7478{
7479 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7480 if (pFpuCtx->FCW & X86_FCW_IM)
7481 {
7482 /* Masked underflow. */
7483 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7484 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7485 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7486 if (iStReg != UINT8_MAX)
7487 {
7488 pFpuCtx->FTW |= RT_BIT(iReg);
7489 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7490 }
7491 }
7492 else
7493 {
7494 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7495 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7496 }
7497}
7498
7499
7500/**
7501 * Raises a FPU stack underflow exception.
7502 *
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param iStReg The destination register that should be loaded
7505 * with QNaN if \#IS is not masked. Specify
7506 * UINT8_MAX if none (like for fcom).
7507 */
7508DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7509{
7510 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7511 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7512 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7513 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7514}
7515
7516
7517DECL_NO_INLINE(IEM_STATIC, void)
7518iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7519{
7520 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7524 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7525}
7526
7527
7528DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7529{
7530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7531 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7532 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7533 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7534 iemFpuMaybePopOne(pFpuCtx);
7535}
7536
7537
7538DECL_NO_INLINE(IEM_STATIC, void)
7539iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7540{
7541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7542 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7543 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7544 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7545 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7546 iemFpuMaybePopOne(pFpuCtx);
7547}
7548
7549
7550DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7551{
7552 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7553 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7554 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7555 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7556 iemFpuMaybePopOne(pFpuCtx);
7557 iemFpuMaybePopOne(pFpuCtx);
7558}
7559
7560
7561DECL_NO_INLINE(IEM_STATIC, void)
7562iemFpuStackPushUnderflow(PVMCPU pVCpu)
7563{
7564 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7565 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7566 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7567
7568 if (pFpuCtx->FCW & X86_FCW_IM)
7569 {
7570 /* Masked overflow - Push QNaN. */
7571 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7572 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7573 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7574 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7575 pFpuCtx->FTW |= RT_BIT(iNewTop);
7576 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7577 iemFpuRotateStackPush(pFpuCtx);
7578 }
7579 else
7580 {
7581 /* Exception pending - don't change TOP or the register stack. */
7582 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7583 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7584 }
7585}
7586
7587
7588DECL_NO_INLINE(IEM_STATIC, void)
7589iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7590{
7591 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7592 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7593 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7594
7595 if (pFpuCtx->FCW & X86_FCW_IM)
7596 {
7597 /* Masked overflow - Push QNaN. */
7598 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7599 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7600 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7601 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7602 pFpuCtx->FTW |= RT_BIT(iNewTop);
7603 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7604 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7605 iemFpuRotateStackPush(pFpuCtx);
7606 }
7607 else
7608 {
7609 /* Exception pending - don't change TOP or the register stack. */
7610 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7611 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7612 }
7613}
7614
7615
7616/**
7617 * Worker routine for raising an FPU stack overflow exception on a push.
7618 *
7619 * @param pFpuCtx The FPU context.
7620 */
7621IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7622{
7623 if (pFpuCtx->FCW & X86_FCW_IM)
7624 {
7625 /* Masked overflow. */
7626 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7627 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7628 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7629 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7630 pFpuCtx->FTW |= RT_BIT(iNewTop);
7631 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7632 iemFpuRotateStackPush(pFpuCtx);
7633 }
7634 else
7635 {
7636 /* Exception pending - don't change TOP or the register stack. */
7637 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7638 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7639 }
7640}
7641
7642
7643/**
7644 * Raises a FPU stack overflow exception on a push.
7645 *
7646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7647 */
7648DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7649{
7650 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7651 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7653 iemFpuStackPushOverflowOnly(pFpuCtx);
7654}
7655
7656
7657/**
7658 * Raises a FPU stack overflow exception on a push with a memory operand.
7659 *
7660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7661 * @param iEffSeg The effective memory operand selector register.
7662 * @param GCPtrEff The effective memory operand offset.
7663 */
7664DECL_NO_INLINE(IEM_STATIC, void)
7665iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7666{
7667 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7668 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7669 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7670 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7671 iemFpuStackPushOverflowOnly(pFpuCtx);
7672}
7673
7674
7675IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7676{
7677 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7678 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7679 if (pFpuCtx->FTW & RT_BIT(iReg))
7680 return VINF_SUCCESS;
7681 return VERR_NOT_FOUND;
7682}
7683
7684
7685IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7686{
7687 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7688 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7689 if (pFpuCtx->FTW & RT_BIT(iReg))
7690 {
7691 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7692 return VINF_SUCCESS;
7693 }
7694 return VERR_NOT_FOUND;
7695}
7696
7697
7698IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7699 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7700{
7701 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7702 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7703 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7704 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7705 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7706 {
7707 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7708 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7709 return VINF_SUCCESS;
7710 }
7711 return VERR_NOT_FOUND;
7712}
7713
7714
7715IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7716{
7717 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7718 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7719 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7720 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7721 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7722 {
7723 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7724 return VINF_SUCCESS;
7725 }
7726 return VERR_NOT_FOUND;
7727}
7728
7729
7730/**
7731 * Updates the FPU exception status after FCW is changed.
7732 *
7733 * @param pFpuCtx The FPU context.
7734 */
7735IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7736{
7737 uint16_t u16Fsw = pFpuCtx->FSW;
7738 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7739 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7740 else
7741 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7742 pFpuCtx->FSW = u16Fsw;
7743}
7744
7745
7746/**
7747 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7748 *
7749 * @returns The full FTW.
7750 * @param pFpuCtx The FPU context.
7751 */
7752IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7753{
7754 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7755 uint16_t u16Ftw = 0;
7756 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7757 for (unsigned iSt = 0; iSt < 8; iSt++)
7758 {
7759 unsigned const iReg = (iSt + iTop) & 7;
7760 if (!(u8Ftw & RT_BIT(iReg)))
7761 u16Ftw |= 3 << (iReg * 2); /* empty */
7762 else
7763 {
7764 uint16_t uTag;
7765 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7766 if (pr80Reg->s.uExponent == 0x7fff)
7767 uTag = 2; /* Exponent is all 1's => Special. */
7768 else if (pr80Reg->s.uExponent == 0x0000)
7769 {
7770 if (pr80Reg->s.u64Mantissa == 0x0000)
7771 uTag = 1; /* All bits are zero => Zero. */
7772 else
7773 uTag = 2; /* Must be special. */
7774 }
7775 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7776 uTag = 0; /* Valid. */
7777 else
7778 uTag = 2; /* Must be special. */
7779
7780 u16Ftw |= uTag << (iReg * 2); /* empty */
7781 }
7782 }
7783
7784 return u16Ftw;
7785}
7786
7787
7788/**
7789 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7790 *
7791 * @returns The compressed FTW.
7792 * @param u16FullFtw The full FTW to convert.
7793 */
7794IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7795{
7796 uint8_t u8Ftw = 0;
7797 for (unsigned i = 0; i < 8; i++)
7798 {
7799 if ((u16FullFtw & 3) != 3 /*empty*/)
7800 u8Ftw |= RT_BIT(i);
7801 u16FullFtw >>= 2;
7802 }
7803
7804 return u8Ftw;
7805}
7806
7807/** @} */
7808
7809
7810/** @name Memory access.
7811 *
7812 * @{
7813 */
7814
7815
7816/**
7817 * Updates the IEMCPU::cbWritten counter if applicable.
7818 *
7819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7820 * @param fAccess The access being accounted for.
7821 * @param cbMem The access size.
7822 */
7823DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7824{
7825 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7826 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7827 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7828}
7829
7830
7831/**
7832 * Checks if the given segment can be written to, raise the appropriate
7833 * exception if not.
7834 *
7835 * @returns VBox strict status code.
7836 *
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param pHid Pointer to the hidden register.
7839 * @param iSegReg The register number.
7840 * @param pu64BaseAddr Where to return the base address to use for the
7841 * segment. (In 64-bit code it may differ from the
7842 * base in the hidden segment.)
7843 */
7844IEM_STATIC VBOXSTRICTRC
7845iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7846{
7847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7848 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7849 else
7850 {
7851 if (!pHid->Attr.n.u1Present)
7852 {
7853 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7854 AssertRelease(uSel == 0);
7855 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7856 return iemRaiseGeneralProtectionFault0(pVCpu);
7857 }
7858
7859 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7860 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7861 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7862 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7863 *pu64BaseAddr = pHid->u64Base;
7864 }
7865 return VINF_SUCCESS;
7866}
7867
7868
7869/**
7870 * Checks if the given segment can be read from, raise the appropriate
7871 * exception if not.
7872 *
7873 * @returns VBox strict status code.
7874 *
7875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7876 * @param pHid Pointer to the hidden register.
7877 * @param iSegReg The register number.
7878 * @param pu64BaseAddr Where to return the base address to use for the
7879 * segment. (In 64-bit code it may differ from the
7880 * base in the hidden segment.)
7881 */
7882IEM_STATIC VBOXSTRICTRC
7883iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7884{
7885 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7886 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7887 else
7888 {
7889 if (!pHid->Attr.n.u1Present)
7890 {
7891 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7892 AssertRelease(uSel == 0);
7893 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7894 return iemRaiseGeneralProtectionFault0(pVCpu);
7895 }
7896
7897 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7898 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7899 *pu64BaseAddr = pHid->u64Base;
7900 }
7901 return VINF_SUCCESS;
7902}
7903
7904
7905/**
7906 * Applies the segment limit, base and attributes.
7907 *
7908 * This may raise a \#GP or \#SS.
7909 *
7910 * @returns VBox strict status code.
7911 *
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param fAccess The kind of access which is being performed.
7914 * @param iSegReg The index of the segment register to apply.
7915 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7916 * TSS, ++).
7917 * @param cbMem The access size.
7918 * @param pGCPtrMem Pointer to the guest memory address to apply
7919 * segmentation to. Input and output parameter.
7920 */
7921IEM_STATIC VBOXSTRICTRC
7922iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7923{
7924 if (iSegReg == UINT8_MAX)
7925 return VINF_SUCCESS;
7926
7927 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7928 switch (pVCpu->iem.s.enmCpuMode)
7929 {
7930 case IEMMODE_16BIT:
7931 case IEMMODE_32BIT:
7932 {
7933 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7934 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7935
7936 if ( pSel->Attr.n.u1Present
7937 && !pSel->Attr.n.u1Unusable)
7938 {
7939 Assert(pSel->Attr.n.u1DescType);
7940 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7941 {
7942 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7943 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7944 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7945
7946 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7947 {
7948 /** @todo CPL check. */
7949 }
7950
7951 /*
7952 * There are two kinds of data selectors, normal and expand down.
7953 */
7954 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7955 {
7956 if ( GCPtrFirst32 > pSel->u32Limit
7957 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7958 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7959 }
7960 else
7961 {
7962 /*
7963 * The upper boundary is defined by the B bit, not the G bit!
7964 */
7965 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7966 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7967 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7968 }
7969 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7970 }
7971 else
7972 {
7973
7974 /*
7975 * Code selector and usually be used to read thru, writing is
7976 * only permitted in real and V8086 mode.
7977 */
7978 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7979 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7980 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7981 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7982 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7983
7984 if ( GCPtrFirst32 > pSel->u32Limit
7985 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7986 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7987
7988 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7989 {
7990 /** @todo CPL check. */
7991 }
7992
7993 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7994 }
7995 }
7996 else
7997 return iemRaiseGeneralProtectionFault0(pVCpu);
7998 return VINF_SUCCESS;
7999 }
8000
8001 case IEMMODE_64BIT:
8002 {
8003 RTGCPTR GCPtrMem = *pGCPtrMem;
8004 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8005 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8006
8007 Assert(cbMem >= 1);
8008 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8009 return VINF_SUCCESS;
8010 return iemRaiseGeneralProtectionFault0(pVCpu);
8011 }
8012
8013 default:
8014 AssertFailedReturn(VERR_IEM_IPE_7);
8015 }
8016}
8017
8018
8019/**
8020 * Translates a virtual address to a physical physical address and checks if we
8021 * can access the page as specified.
8022 *
8023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8024 * @param GCPtrMem The virtual address.
8025 * @param fAccess The intended access.
8026 * @param pGCPhysMem Where to return the physical address.
8027 */
8028IEM_STATIC VBOXSTRICTRC
8029iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8030{
8031 /** @todo Need a different PGM interface here. We're currently using
8032 * generic / REM interfaces. this won't cut it for R0 & RC. */
8033 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8034 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8035 RTGCPHYS GCPhys;
8036 uint64_t fFlags;
8037 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8038 if (RT_FAILURE(rc))
8039 {
8040 /** @todo Check unassigned memory in unpaged mode. */
8041 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8042 *pGCPhysMem = NIL_RTGCPHYS;
8043 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8044 }
8045
8046 /* If the page is writable and does not have the no-exec bit set, all
8047 access is allowed. Otherwise we'll have to check more carefully... */
8048 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8049 {
8050 /* Write to read only memory? */
8051 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8052 && !(fFlags & X86_PTE_RW)
8053 && ( (pVCpu->iem.s.uCpl == 3
8054 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8055 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8056 {
8057 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8058 *pGCPhysMem = NIL_RTGCPHYS;
8059 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8060 }
8061
8062 /* Kernel memory accessed by userland? */
8063 if ( !(fFlags & X86_PTE_US)
8064 && pVCpu->iem.s.uCpl == 3
8065 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8066 {
8067 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8068 *pGCPhysMem = NIL_RTGCPHYS;
8069 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8070 }
8071
8072 /* Executing non-executable memory? */
8073 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8074 && (fFlags & X86_PTE_PAE_NX)
8075 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8076 {
8077 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8078 *pGCPhysMem = NIL_RTGCPHYS;
8079 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8080 VERR_ACCESS_DENIED);
8081 }
8082 }
8083
8084 /*
8085 * Set the dirty / access flags.
8086 * ASSUMES this is set when the address is translated rather than on committ...
8087 */
8088 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8089 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8090 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8091 {
8092 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8093 AssertRC(rc2);
8094 }
8095
8096 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8097 *pGCPhysMem = GCPhys;
8098 return VINF_SUCCESS;
8099}
8100
8101
8102
8103/**
8104 * Maps a physical page.
8105 *
8106 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8108 * @param GCPhysMem The physical address.
8109 * @param fAccess The intended access.
8110 * @param ppvMem Where to return the mapping address.
8111 * @param pLock The PGM lock.
8112 */
8113IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8114{
8115#ifdef IEM_VERIFICATION_MODE_FULL
8116 /* Force the alternative path so we can ignore writes. */
8117 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8118 {
8119 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8120 {
8121 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8122 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8123 if (RT_FAILURE(rc2))
8124 pVCpu->iem.s.fProblematicMemory = true;
8125 }
8126 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8127 }
8128#endif
8129#ifdef IEM_LOG_MEMORY_WRITES
8130 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8131 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8132#endif
8133#ifdef IEM_VERIFICATION_MODE_MINIMAL
8134 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8135#endif
8136
8137 /** @todo This API may require some improving later. A private deal with PGM
8138 * regarding locking and unlocking needs to be struct. A couple of TLBs
8139 * living in PGM, but with publicly accessible inlined access methods
8140 * could perhaps be an even better solution. */
8141 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8142 GCPhysMem,
8143 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8144 pVCpu->iem.s.fBypassHandlers,
8145 ppvMem,
8146 pLock);
8147 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8148 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8149
8150#ifdef IEM_VERIFICATION_MODE_FULL
8151 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8152 pVCpu->iem.s.fProblematicMemory = true;
8153#endif
8154 return rc;
8155}
8156
8157
8158/**
8159 * Unmap a page previously mapped by iemMemPageMap.
8160 *
8161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8162 * @param GCPhysMem The physical address.
8163 * @param fAccess The intended access.
8164 * @param pvMem What iemMemPageMap returned.
8165 * @param pLock The PGM lock.
8166 */
8167DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8168{
8169 NOREF(pVCpu);
8170 NOREF(GCPhysMem);
8171 NOREF(fAccess);
8172 NOREF(pvMem);
8173 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8174}
8175
8176
8177/**
8178 * Looks up a memory mapping entry.
8179 *
8180 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8182 * @param pvMem The memory address.
8183 * @param fAccess The access to.
8184 */
8185DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8186{
8187 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8188 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8189 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8190 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8191 return 0;
8192 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8193 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8194 return 1;
8195 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8196 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8197 return 2;
8198 return VERR_NOT_FOUND;
8199}
8200
8201
8202/**
8203 * Finds a free memmap entry when using iNextMapping doesn't work.
8204 *
8205 * @returns Memory mapping index, 1024 on failure.
8206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8207 */
8208IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8209{
8210 /*
8211 * The easy case.
8212 */
8213 if (pVCpu->iem.s.cActiveMappings == 0)
8214 {
8215 pVCpu->iem.s.iNextMapping = 1;
8216 return 0;
8217 }
8218
8219 /* There should be enough mappings for all instructions. */
8220 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8221
8222 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8223 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8224 return i;
8225
8226 AssertFailedReturn(1024);
8227}
8228
8229
8230/**
8231 * Commits a bounce buffer that needs writing back and unmaps it.
8232 *
8233 * @returns Strict VBox status code.
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 * @param iMemMap The index of the buffer to commit.
8236 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8237 * Always false in ring-3, obviously.
8238 */
8239IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8240{
8241 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8242 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8243#ifdef IN_RING3
8244 Assert(!fPostponeFail);
8245 RT_NOREF_PV(fPostponeFail);
8246#endif
8247
8248 /*
8249 * Do the writing.
8250 */
8251#ifndef IEM_VERIFICATION_MODE_MINIMAL
8252 PVM pVM = pVCpu->CTX_SUFF(pVM);
8253 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8254 && !IEM_VERIFICATION_ENABLED(pVCpu))
8255 {
8256 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8257 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8258 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8259 if (!pVCpu->iem.s.fBypassHandlers)
8260 {
8261 /*
8262 * Carefully and efficiently dealing with access handler return
8263 * codes make this a little bloated.
8264 */
8265 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8266 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8267 pbBuf,
8268 cbFirst,
8269 PGMACCESSORIGIN_IEM);
8270 if (rcStrict == VINF_SUCCESS)
8271 {
8272 if (cbSecond)
8273 {
8274 rcStrict = PGMPhysWrite(pVM,
8275 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8276 pbBuf + cbFirst,
8277 cbSecond,
8278 PGMACCESSORIGIN_IEM);
8279 if (rcStrict == VINF_SUCCESS)
8280 { /* nothing */ }
8281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8282 {
8283 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8284 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8285 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8286 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8287 }
8288# ifndef IN_RING3
8289 else if (fPostponeFail)
8290 {
8291 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8292 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8293 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8294 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8295 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8296 return iemSetPassUpStatus(pVCpu, rcStrict);
8297 }
8298# endif
8299 else
8300 {
8301 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8302 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8304 return rcStrict;
8305 }
8306 }
8307 }
8308 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8309 {
8310 if (!cbSecond)
8311 {
8312 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8314 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8315 }
8316 else
8317 {
8318 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8319 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8320 pbBuf + cbFirst,
8321 cbSecond,
8322 PGMACCESSORIGIN_IEM);
8323 if (rcStrict2 == VINF_SUCCESS)
8324 {
8325 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8326 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8327 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8328 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8329 }
8330 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8331 {
8332 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8334 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8335 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8336 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8337 }
8338# ifndef IN_RING3
8339 else if (fPostponeFail)
8340 {
8341 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8344 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8345 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8346 return iemSetPassUpStatus(pVCpu, rcStrict);
8347 }
8348# endif
8349 else
8350 {
8351 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8352 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8354 return rcStrict2;
8355 }
8356 }
8357 }
8358# ifndef IN_RING3
8359 else if (fPostponeFail)
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8364 if (!cbSecond)
8365 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8366 else
8367 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8368 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8369 return iemSetPassUpStatus(pVCpu, rcStrict);
8370 }
8371# endif
8372 else
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8377 return rcStrict;
8378 }
8379 }
8380 else
8381 {
8382 /*
8383 * No access handlers, much simpler.
8384 */
8385 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8386 if (RT_SUCCESS(rc))
8387 {
8388 if (cbSecond)
8389 {
8390 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8391 if (RT_SUCCESS(rc))
8392 { /* likely */ }
8393 else
8394 {
8395 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8398 return rc;
8399 }
8400 }
8401 }
8402 else
8403 {
8404 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8406 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8407 return rc;
8408 }
8409 }
8410 }
8411#endif
8412
8413#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8414 /*
8415 * Record the write(s).
8416 */
8417 if (!pVCpu->iem.s.fNoRem)
8418 {
8419 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8420 if (pEvtRec)
8421 {
8422 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8423 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8424 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8425 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8426 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8427 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8428 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8429 }
8430 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8431 {
8432 pEvtRec = iemVerifyAllocRecord(pVCpu);
8433 if (pEvtRec)
8434 {
8435 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8436 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8437 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8438 memcpy(pEvtRec->u.RamWrite.ab,
8439 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8441 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8442 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8443 }
8444 }
8445 }
8446#endif
8447#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8448 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8449 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8450 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8451 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8452 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8453 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8454
8455 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8456 g_cbIemWrote = cbWrote;
8457 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8458#endif
8459
8460 /*
8461 * Free the mapping entry.
8462 */
8463 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8464 Assert(pVCpu->iem.s.cActiveMappings != 0);
8465 pVCpu->iem.s.cActiveMappings--;
8466 return VINF_SUCCESS;
8467}
8468
8469
8470/**
8471 * iemMemMap worker that deals with a request crossing pages.
8472 */
8473IEM_STATIC VBOXSTRICTRC
8474iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8475{
8476 /*
8477 * Do the address translations.
8478 */
8479 RTGCPHYS GCPhysFirst;
8480 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8481 if (rcStrict != VINF_SUCCESS)
8482 return rcStrict;
8483
8484 RTGCPHYS GCPhysSecond;
8485 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8486 fAccess, &GCPhysSecond);
8487 if (rcStrict != VINF_SUCCESS)
8488 return rcStrict;
8489 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8490
8491 PVM pVM = pVCpu->CTX_SUFF(pVM);
8492#ifdef IEM_VERIFICATION_MODE_FULL
8493 /*
8494 * Detect problematic memory when verifying so we can select
8495 * the right execution engine. (TLB: Redo this.)
8496 */
8497 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8498 {
8499 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8500 if (RT_SUCCESS(rc2))
8501 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8502 if (RT_FAILURE(rc2))
8503 pVCpu->iem.s.fProblematicMemory = true;
8504 }
8505#endif
8506
8507
8508 /*
8509 * Read in the current memory content if it's a read, execute or partial
8510 * write access.
8511 */
8512 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8513 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8514 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8515
8516 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8517 {
8518 if (!pVCpu->iem.s.fBypassHandlers)
8519 {
8520 /*
8521 * Must carefully deal with access handler status codes here,
8522 * makes the code a bit bloated.
8523 */
8524 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8525 if (rcStrict == VINF_SUCCESS)
8526 {
8527 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8528 if (rcStrict == VINF_SUCCESS)
8529 { /*likely */ }
8530 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8531 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8532 else
8533 {
8534 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8535 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8536 return rcStrict;
8537 }
8538 }
8539 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8540 {
8541 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8542 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8543 {
8544 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8545 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8546 }
8547 else
8548 {
8549 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8550 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8551 return rcStrict2;
8552 }
8553 }
8554 else
8555 {
8556 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8557 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8558 return rcStrict;
8559 }
8560 }
8561 else
8562 {
8563 /*
8564 * No informational status codes here, much more straight forward.
8565 */
8566 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8567 if (RT_SUCCESS(rc))
8568 {
8569 Assert(rc == VINF_SUCCESS);
8570 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8571 if (RT_SUCCESS(rc))
8572 Assert(rc == VINF_SUCCESS);
8573 else
8574 {
8575 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8576 return rc;
8577 }
8578 }
8579 else
8580 {
8581 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8582 return rc;
8583 }
8584 }
8585
8586#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8587 if ( !pVCpu->iem.s.fNoRem
8588 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8589 {
8590 /*
8591 * Record the reads.
8592 */
8593 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8594 if (pEvtRec)
8595 {
8596 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8597 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8598 pEvtRec->u.RamRead.cb = cbFirstPage;
8599 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8600 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8601 }
8602 pEvtRec = iemVerifyAllocRecord(pVCpu);
8603 if (pEvtRec)
8604 {
8605 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8606 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8607 pEvtRec->u.RamRead.cb = cbSecondPage;
8608 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8609 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8610 }
8611 }
8612#endif
8613 }
8614#ifdef VBOX_STRICT
8615 else
8616 memset(pbBuf, 0xcc, cbMem);
8617 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8618 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8619#endif
8620
8621 /*
8622 * Commit the bounce buffer entry.
8623 */
8624 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8625 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8626 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8629 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8630 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8631 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8632 pVCpu->iem.s.cActiveMappings++;
8633
8634 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8635 *ppvMem = pbBuf;
8636 return VINF_SUCCESS;
8637}
8638
8639
8640/**
8641 * iemMemMap woker that deals with iemMemPageMap failures.
8642 */
8643IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8644 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8645{
8646 /*
8647 * Filter out conditions we can handle and the ones which shouldn't happen.
8648 */
8649 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8650 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8651 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8652 {
8653 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8654 return rcMap;
8655 }
8656 pVCpu->iem.s.cPotentialExits++;
8657
8658 /*
8659 * Read in the current memory content if it's a read, execute or partial
8660 * write access.
8661 */
8662 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8663 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8664 {
8665 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8666 memset(pbBuf, 0xff, cbMem);
8667 else
8668 {
8669 int rc;
8670 if (!pVCpu->iem.s.fBypassHandlers)
8671 {
8672 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8673 if (rcStrict == VINF_SUCCESS)
8674 { /* nothing */ }
8675 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8676 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8677 else
8678 {
8679 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8680 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8681 return rcStrict;
8682 }
8683 }
8684 else
8685 {
8686 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8687 if (RT_SUCCESS(rc))
8688 { /* likely */ }
8689 else
8690 {
8691 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8692 GCPhysFirst, rc));
8693 return rc;
8694 }
8695 }
8696 }
8697
8698#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8699 if ( !pVCpu->iem.s.fNoRem
8700 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8701 {
8702 /*
8703 * Record the read.
8704 */
8705 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8706 if (pEvtRec)
8707 {
8708 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8709 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8710 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8711 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8712 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8713 }
8714 }
8715#endif
8716 }
8717#ifdef VBOX_STRICT
8718 else
8719 memset(pbBuf, 0xcc, cbMem);
8720#endif
8721#ifdef VBOX_STRICT
8722 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8723 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8724#endif
8725
8726 /*
8727 * Commit the bounce buffer entry.
8728 */
8729 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8731 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8734 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8735 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8736 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8737 pVCpu->iem.s.cActiveMappings++;
8738
8739 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8740 *ppvMem = pbBuf;
8741 return VINF_SUCCESS;
8742}
8743
8744
8745
8746/**
8747 * Maps the specified guest memory for the given kind of access.
8748 *
8749 * This may be using bounce buffering of the memory if it's crossing a page
8750 * boundary or if there is an access handler installed for any of it. Because
8751 * of lock prefix guarantees, we're in for some extra clutter when this
8752 * happens.
8753 *
8754 * This may raise a \#GP, \#SS, \#PF or \#AC.
8755 *
8756 * @returns VBox strict status code.
8757 *
8758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8759 * @param ppvMem Where to return the pointer to the mapped
8760 * memory.
8761 * @param cbMem The number of bytes to map. This is usually 1,
8762 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8763 * string operations it can be up to a page.
8764 * @param iSegReg The index of the segment register to use for
8765 * this access. The base and limits are checked.
8766 * Use UINT8_MAX to indicate that no segmentation
8767 * is required (for IDT, GDT and LDT accesses).
8768 * @param GCPtrMem The address of the guest memory.
8769 * @param fAccess How the memory is being accessed. The
8770 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8771 * how to map the memory, while the
8772 * IEM_ACCESS_WHAT_XXX bit is used when raising
8773 * exceptions.
8774 */
8775IEM_STATIC VBOXSTRICTRC
8776iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8777{
8778 /*
8779 * Check the input and figure out which mapping entry to use.
8780 */
8781 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8782 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8783 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8784
8785 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8786 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8787 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8788 {
8789 iMemMap = iemMemMapFindFree(pVCpu);
8790 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8791 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8792 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8793 pVCpu->iem.s.aMemMappings[2].fAccess),
8794 VERR_IEM_IPE_9);
8795 }
8796
8797 /*
8798 * Map the memory, checking that we can actually access it. If something
8799 * slightly complicated happens, fall back on bounce buffering.
8800 */
8801 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8802 if (rcStrict != VINF_SUCCESS)
8803 return rcStrict;
8804
8805 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8806 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8807
8808 RTGCPHYS GCPhysFirst;
8809 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8810 if (rcStrict != VINF_SUCCESS)
8811 return rcStrict;
8812
8813 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8814 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8815 if (fAccess & IEM_ACCESS_TYPE_READ)
8816 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8817
8818 void *pvMem;
8819 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8820 if (rcStrict != VINF_SUCCESS)
8821 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8822
8823 /*
8824 * Fill in the mapping table entry.
8825 */
8826 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8827 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8828 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8829 pVCpu->iem.s.cActiveMappings++;
8830
8831 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8832 *ppvMem = pvMem;
8833 return VINF_SUCCESS;
8834}
8835
8836
8837/**
8838 * Commits the guest memory if bounce buffered and unmaps it.
8839 *
8840 * @returns Strict VBox status code.
8841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8842 * @param pvMem The mapping.
8843 * @param fAccess The kind of access.
8844 */
8845IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8846{
8847 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8848 AssertReturn(iMemMap >= 0, iMemMap);
8849
8850 /* If it's bounce buffered, we may need to write back the buffer. */
8851 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8852 {
8853 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8854 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8855 }
8856 /* Otherwise unlock it. */
8857 else
8858 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8859
8860 /* Free the entry. */
8861 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8862 Assert(pVCpu->iem.s.cActiveMappings != 0);
8863 pVCpu->iem.s.cActiveMappings--;
8864 return VINF_SUCCESS;
8865}
8866
8867#ifdef IEM_WITH_SETJMP
8868
8869/**
8870 * Maps the specified guest memory for the given kind of access, longjmp on
8871 * error.
8872 *
8873 * This may be using bounce buffering of the memory if it's crossing a page
8874 * boundary or if there is an access handler installed for any of it. Because
8875 * of lock prefix guarantees, we're in for some extra clutter when this
8876 * happens.
8877 *
8878 * This may raise a \#GP, \#SS, \#PF or \#AC.
8879 *
8880 * @returns Pointer to the mapped memory.
8881 *
8882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8883 * @param cbMem The number of bytes to map. This is usually 1,
8884 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8885 * string operations it can be up to a page.
8886 * @param iSegReg The index of the segment register to use for
8887 * this access. The base and limits are checked.
8888 * Use UINT8_MAX to indicate that no segmentation
8889 * is required (for IDT, GDT and LDT accesses).
8890 * @param GCPtrMem The address of the guest memory.
8891 * @param fAccess How the memory is being accessed. The
8892 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8893 * how to map the memory, while the
8894 * IEM_ACCESS_WHAT_XXX bit is used when raising
8895 * exceptions.
8896 */
8897IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8898{
8899 /*
8900 * Check the input and figure out which mapping entry to use.
8901 */
8902 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8903 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8904 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8905
8906 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8907 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8908 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8909 {
8910 iMemMap = iemMemMapFindFree(pVCpu);
8911 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8912 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8913 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8914 pVCpu->iem.s.aMemMappings[2].fAccess),
8915 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8916 }
8917
8918 /*
8919 * Map the memory, checking that we can actually access it. If something
8920 * slightly complicated happens, fall back on bounce buffering.
8921 */
8922 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8923 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8924 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8925
8926 /* Crossing a page boundary? */
8927 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8928 { /* No (likely). */ }
8929 else
8930 {
8931 void *pvMem;
8932 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8933 if (rcStrict == VINF_SUCCESS)
8934 return pvMem;
8935 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8936 }
8937
8938 RTGCPHYS GCPhysFirst;
8939 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8940 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8941 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8942
8943 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8944 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8945 if (fAccess & IEM_ACCESS_TYPE_READ)
8946 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8947
8948 void *pvMem;
8949 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8950 if (rcStrict == VINF_SUCCESS)
8951 { /* likely */ }
8952 else
8953 {
8954 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8955 if (rcStrict == VINF_SUCCESS)
8956 return pvMem;
8957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8958 }
8959
8960 /*
8961 * Fill in the mapping table entry.
8962 */
8963 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8964 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8965 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8966 pVCpu->iem.s.cActiveMappings++;
8967
8968 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8969 return pvMem;
8970}
8971
8972
8973/**
8974 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8975 *
8976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8977 * @param pvMem The mapping.
8978 * @param fAccess The kind of access.
8979 */
8980IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8981{
8982 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8983 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8984
8985 /* If it's bounce buffered, we may need to write back the buffer. */
8986 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8987 {
8988 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8989 {
8990 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8991 if (rcStrict == VINF_SUCCESS)
8992 return;
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8994 }
8995 }
8996 /* Otherwise unlock it. */
8997 else
8998 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8999
9000 /* Free the entry. */
9001 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9002 Assert(pVCpu->iem.s.cActiveMappings != 0);
9003 pVCpu->iem.s.cActiveMappings--;
9004}
9005
9006#endif
9007
9008#ifndef IN_RING3
9009/**
9010 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9011 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9012 *
9013 * Allows the instruction to be completed and retired, while the IEM user will
9014 * return to ring-3 immediately afterwards and do the postponed writes there.
9015 *
9016 * @returns VBox status code (no strict statuses). Caller must check
9017 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9019 * @param pvMem The mapping.
9020 * @param fAccess The kind of access.
9021 */
9022IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9023{
9024 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9025 AssertReturn(iMemMap >= 0, iMemMap);
9026
9027 /* If it's bounce buffered, we may need to write back the buffer. */
9028 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9029 {
9030 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9031 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9032 }
9033 /* Otherwise unlock it. */
9034 else
9035 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9036
9037 /* Free the entry. */
9038 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9039 Assert(pVCpu->iem.s.cActiveMappings != 0);
9040 pVCpu->iem.s.cActiveMappings--;
9041 return VINF_SUCCESS;
9042}
9043#endif
9044
9045
9046/**
9047 * Rollbacks mappings, releasing page locks and such.
9048 *
9049 * The caller shall only call this after checking cActiveMappings.
9050 *
9051 * @returns Strict VBox status code to pass up.
9052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9053 */
9054IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9055{
9056 Assert(pVCpu->iem.s.cActiveMappings > 0);
9057
9058 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9059 while (iMemMap-- > 0)
9060 {
9061 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9062 if (fAccess != IEM_ACCESS_INVALID)
9063 {
9064 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9065 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9066 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9067 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9068 Assert(pVCpu->iem.s.cActiveMappings > 0);
9069 pVCpu->iem.s.cActiveMappings--;
9070 }
9071 }
9072}
9073
9074
9075/**
9076 * Fetches a data byte.
9077 *
9078 * @returns Strict VBox status code.
9079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9080 * @param pu8Dst Where to return the byte.
9081 * @param iSegReg The index of the segment register to use for
9082 * this access. The base and limits are checked.
9083 * @param GCPtrMem The address of the guest memory.
9084 */
9085IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9086{
9087 /* The lazy approach for now... */
9088 uint8_t const *pu8Src;
9089 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9090 if (rc == VINF_SUCCESS)
9091 {
9092 *pu8Dst = *pu8Src;
9093 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9094 }
9095 return rc;
9096}
9097
9098
9099#ifdef IEM_WITH_SETJMP
9100/**
9101 * Fetches a data byte, longjmp on error.
9102 *
9103 * @returns The byte.
9104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9105 * @param iSegReg The index of the segment register to use for
9106 * this access. The base and limits are checked.
9107 * @param GCPtrMem The address of the guest memory.
9108 */
9109DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9110{
9111 /* The lazy approach for now... */
9112 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9113 uint8_t const bRet = *pu8Src;
9114 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9115 return bRet;
9116}
9117#endif /* IEM_WITH_SETJMP */
9118
9119
9120/**
9121 * Fetches a data word.
9122 *
9123 * @returns Strict VBox status code.
9124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9125 * @param pu16Dst Where to return the word.
9126 * @param iSegReg The index of the segment register to use for
9127 * this access. The base and limits are checked.
9128 * @param GCPtrMem The address of the guest memory.
9129 */
9130IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9131{
9132 /* The lazy approach for now... */
9133 uint16_t const *pu16Src;
9134 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9135 if (rc == VINF_SUCCESS)
9136 {
9137 *pu16Dst = *pu16Src;
9138 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9139 }
9140 return rc;
9141}
9142
9143
9144#ifdef IEM_WITH_SETJMP
9145/**
9146 * Fetches a data word, longjmp on error.
9147 *
9148 * @returns The word
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param iSegReg The index of the segment register to use for
9151 * this access. The base and limits are checked.
9152 * @param GCPtrMem The address of the guest memory.
9153 */
9154DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9155{
9156 /* The lazy approach for now... */
9157 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9158 uint16_t const u16Ret = *pu16Src;
9159 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9160 return u16Ret;
9161}
9162#endif
9163
9164
9165/**
9166 * Fetches a data dword.
9167 *
9168 * @returns Strict VBox status code.
9169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9170 * @param pu32Dst Where to return the dword.
9171 * @param iSegReg The index of the segment register to use for
9172 * this access. The base and limits are checked.
9173 * @param GCPtrMem The address of the guest memory.
9174 */
9175IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9176{
9177 /* The lazy approach for now... */
9178 uint32_t const *pu32Src;
9179 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9180 if (rc == VINF_SUCCESS)
9181 {
9182 *pu32Dst = *pu32Src;
9183 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9184 }
9185 return rc;
9186}
9187
9188
9189#ifdef IEM_WITH_SETJMP
9190
9191IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9192{
9193 Assert(cbMem >= 1);
9194 Assert(iSegReg < X86_SREG_COUNT);
9195
9196 /*
9197 * 64-bit mode is simpler.
9198 */
9199 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9200 {
9201 if (iSegReg >= X86_SREG_FS)
9202 {
9203 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9204 GCPtrMem += pSel->u64Base;
9205 }
9206
9207 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9208 return GCPtrMem;
9209 }
9210 /*
9211 * 16-bit and 32-bit segmentation.
9212 */
9213 else
9214 {
9215 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9216 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9217 == X86DESCATTR_P /* data, expand up */
9218 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9219 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9220 {
9221 /* expand up */
9222 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9223 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9224 && GCPtrLast32 > (uint32_t)GCPtrMem))
9225 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9226 }
9227 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9228 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9229 {
9230 /* expand down */
9231 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9232 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9233 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9234 && GCPtrLast32 > (uint32_t)GCPtrMem))
9235 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9236 }
9237 else
9238 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9239 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9240 }
9241 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9242}
9243
9244
9245IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9246{
9247 Assert(cbMem >= 1);
9248 Assert(iSegReg < X86_SREG_COUNT);
9249
9250 /*
9251 * 64-bit mode is simpler.
9252 */
9253 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9254 {
9255 if (iSegReg >= X86_SREG_FS)
9256 {
9257 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9258 GCPtrMem += pSel->u64Base;
9259 }
9260
9261 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9262 return GCPtrMem;
9263 }
9264 /*
9265 * 16-bit and 32-bit segmentation.
9266 */
9267 else
9268 {
9269 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9270 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9271 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9272 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9273 {
9274 /* expand up */
9275 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9276 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9277 && GCPtrLast32 > (uint32_t)GCPtrMem))
9278 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9279 }
9280 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9281 {
9282 /* expand down */
9283 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9284 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9285 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9286 && GCPtrLast32 > (uint32_t)GCPtrMem))
9287 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9288 }
9289 else
9290 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9291 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9292 }
9293 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9294}
9295
9296
9297/**
9298 * Fetches a data dword, longjmp on error, fallback/safe version.
9299 *
9300 * @returns The dword
9301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9302 * @param iSegReg The index of the segment register to use for
9303 * this access. The base and limits are checked.
9304 * @param GCPtrMem The address of the guest memory.
9305 */
9306IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9307{
9308 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9309 uint32_t const u32Ret = *pu32Src;
9310 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9311 return u32Ret;
9312}
9313
9314
9315/**
9316 * Fetches a data dword, longjmp on error.
9317 *
9318 * @returns The dword
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param iSegReg The index of the segment register to use for
9321 * this access. The base and limits are checked.
9322 * @param GCPtrMem The address of the guest memory.
9323 */
9324DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9325{
9326# ifdef IEM_WITH_DATA_TLB
9327 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9328 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9329 {
9330 /// @todo more later.
9331 }
9332
9333 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9334# else
9335 /* The lazy approach. */
9336 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9337 uint32_t const u32Ret = *pu32Src;
9338 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9339 return u32Ret;
9340# endif
9341}
9342#endif
9343
9344
9345#ifdef SOME_UNUSED_FUNCTION
9346/**
9347 * Fetches a data dword and sign extends it to a qword.
9348 *
9349 * @returns Strict VBox status code.
9350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9351 * @param pu64Dst Where to return the sign extended value.
9352 * @param iSegReg The index of the segment register to use for
9353 * this access. The base and limits are checked.
9354 * @param GCPtrMem The address of the guest memory.
9355 */
9356IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9357{
9358 /* The lazy approach for now... */
9359 int32_t const *pi32Src;
9360 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9361 if (rc == VINF_SUCCESS)
9362 {
9363 *pu64Dst = *pi32Src;
9364 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9365 }
9366#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9367 else
9368 *pu64Dst = 0;
9369#endif
9370 return rc;
9371}
9372#endif
9373
9374
9375/**
9376 * Fetches a data qword.
9377 *
9378 * @returns Strict VBox status code.
9379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9380 * @param pu64Dst Where to return the qword.
9381 * @param iSegReg The index of the segment register to use for
9382 * this access. The base and limits are checked.
9383 * @param GCPtrMem The address of the guest memory.
9384 */
9385IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9386{
9387 /* The lazy approach for now... */
9388 uint64_t const *pu64Src;
9389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9390 if (rc == VINF_SUCCESS)
9391 {
9392 *pu64Dst = *pu64Src;
9393 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9394 }
9395 return rc;
9396}
9397
9398
9399#ifdef IEM_WITH_SETJMP
9400/**
9401 * Fetches a data qword, longjmp on error.
9402 *
9403 * @returns The qword.
9404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9405 * @param iSegReg The index of the segment register to use for
9406 * this access. The base and limits are checked.
9407 * @param GCPtrMem The address of the guest memory.
9408 */
9409DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9410{
9411 /* The lazy approach for now... */
9412 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9413 uint64_t const u64Ret = *pu64Src;
9414 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9415 return u64Ret;
9416}
9417#endif
9418
9419
9420/**
9421 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9422 *
9423 * @returns Strict VBox status code.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param pu64Dst Where to return the qword.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432 /* The lazy approach for now... */
9433 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9434 if (RT_UNLIKELY(GCPtrMem & 15))
9435 return iemRaiseGeneralProtectionFault0(pVCpu);
9436
9437 uint64_t const *pu64Src;
9438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9439 if (rc == VINF_SUCCESS)
9440 {
9441 *pu64Dst = *pu64Src;
9442 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9443 }
9444 return rc;
9445}
9446
9447
9448#ifdef IEM_WITH_SETJMP
9449/**
9450 * Fetches a data qword, longjmp on error.
9451 *
9452 * @returns The qword.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9462 if (RT_LIKELY(!(GCPtrMem & 15)))
9463 {
9464 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9465 uint64_t const u64Ret = *pu64Src;
9466 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9467 return u64Ret;
9468 }
9469
9470 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9471 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9472}
9473#endif
9474
9475
9476/**
9477 * Fetches a data tword.
9478 *
9479 * @returns Strict VBox status code.
9480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9481 * @param pr80Dst Where to return the tword.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 */
9486IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9487{
9488 /* The lazy approach for now... */
9489 PCRTFLOAT80U pr80Src;
9490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9491 if (rc == VINF_SUCCESS)
9492 {
9493 *pr80Dst = *pr80Src;
9494 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9495 }
9496 return rc;
9497}
9498
9499
9500#ifdef IEM_WITH_SETJMP
9501/**
9502 * Fetches a data tword, longjmp on error.
9503 *
9504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9505 * @param pr80Dst Where to return the tword.
9506 * @param iSegReg The index of the segment register to use for
9507 * this access. The base and limits are checked.
9508 * @param GCPtrMem The address of the guest memory.
9509 */
9510DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9511{
9512 /* The lazy approach for now... */
9513 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9514 *pr80Dst = *pr80Src;
9515 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9516}
9517#endif
9518
9519
9520/**
9521 * Fetches a data dqword (double qword), generally SSE related.
9522 *
9523 * @returns Strict VBox status code.
9524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9525 * @param pu128Dst Where to return the qword.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 */
9530IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9531{
9532 /* The lazy approach for now... */
9533 PCRTUINT128U pu128Src;
9534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9535 if (rc == VINF_SUCCESS)
9536 {
9537 pu128Dst->au64[0] = pu128Src->au64[0];
9538 pu128Dst->au64[1] = pu128Src->au64[1];
9539 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9540 }
9541 return rc;
9542}
9543
9544
9545#ifdef IEM_WITH_SETJMP
9546/**
9547 * Fetches a data dqword (double qword), generally SSE related.
9548 *
9549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9550 * @param pu128Dst Where to return the qword.
9551 * @param iSegReg The index of the segment register to use for
9552 * this access. The base and limits are checked.
9553 * @param GCPtrMem The address of the guest memory.
9554 */
9555IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9556{
9557 /* The lazy approach for now... */
9558 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9559 pu128Dst->au64[0] = pu128Src->au64[0];
9560 pu128Dst->au64[1] = pu128Src->au64[1];
9561 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9562}
9563#endif
9564
9565
9566/**
9567 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9568 * related.
9569 *
9570 * Raises \#GP(0) if not aligned.
9571 *
9572 * @returns Strict VBox status code.
9573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9574 * @param pu128Dst Where to return the qword.
9575 * @param iSegReg The index of the segment register to use for
9576 * this access. The base and limits are checked.
9577 * @param GCPtrMem The address of the guest memory.
9578 */
9579IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9580{
9581 /* The lazy approach for now... */
9582 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9583 if ( (GCPtrMem & 15)
9584 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9585 return iemRaiseGeneralProtectionFault0(pVCpu);
9586
9587 PCRTUINT128U pu128Src;
9588 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9589 if (rc == VINF_SUCCESS)
9590 {
9591 pu128Dst->au64[0] = pu128Src->au64[0];
9592 pu128Dst->au64[1] = pu128Src->au64[1];
9593 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9594 }
9595 return rc;
9596}
9597
9598
9599#ifdef IEM_WITH_SETJMP
9600/**
9601 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9602 * related, longjmp on error.
9603 *
9604 * Raises \#GP(0) if not aligned.
9605 *
9606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9607 * @param pu128Dst Where to return the qword.
9608 * @param iSegReg The index of the segment register to use for
9609 * this access. The base and limits are checked.
9610 * @param GCPtrMem The address of the guest memory.
9611 */
9612DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9613{
9614 /* The lazy approach for now... */
9615 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9616 if ( (GCPtrMem & 15) == 0
9617 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9618 {
9619 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9620 pu128Dst->au64[0] = pu128Src->au64[0];
9621 pu128Dst->au64[1] = pu128Src->au64[1];
9622 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9623 return;
9624 }
9625
9626 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9627 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9628}
9629#endif
9630
9631
9632/**
9633 * Fetches a data oword (octo word), generally AVX related.
9634 *
9635 * @returns Strict VBox status code.
9636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9637 * @param pu256Dst Where to return the qword.
9638 * @param iSegReg The index of the segment register to use for
9639 * this access. The base and limits are checked.
9640 * @param GCPtrMem The address of the guest memory.
9641 */
9642IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9643{
9644 /* The lazy approach for now... */
9645 PCRTUINT256U pu256Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 pu256Dst->au64[0] = pu256Src->au64[0];
9650 pu256Dst->au64[1] = pu256Src->au64[1];
9651 pu256Dst->au64[2] = pu256Src->au64[2];
9652 pu256Dst->au64[3] = pu256Src->au64[3];
9653 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9654 }
9655 return rc;
9656}
9657
9658
9659#ifdef IEM_WITH_SETJMP
9660/**
9661 * Fetches a data oword (octo word), generally AVX related.
9662 *
9663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9664 * @param pu256Dst Where to return the qword.
9665 * @param iSegReg The index of the segment register to use for
9666 * this access. The base and limits are checked.
9667 * @param GCPtrMem The address of the guest memory.
9668 */
9669IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9670{
9671 /* The lazy approach for now... */
9672 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9673 pu256Dst->au64[0] = pu256Src->au64[0];
9674 pu256Dst->au64[1] = pu256Src->au64[1];
9675 pu256Dst->au64[2] = pu256Src->au64[2];
9676 pu256Dst->au64[3] = pu256Src->au64[3];
9677 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9678}
9679#endif
9680
9681
9682/**
9683 * Fetches a data oword (octo word) at an aligned address, generally AVX
9684 * related.
9685 *
9686 * Raises \#GP(0) if not aligned.
9687 *
9688 * @returns Strict VBox status code.
9689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9690 * @param pu256Dst Where to return the qword.
9691 * @param iSegReg The index of the segment register to use for
9692 * this access. The base and limits are checked.
9693 * @param GCPtrMem The address of the guest memory.
9694 */
9695IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9696{
9697 /* The lazy approach for now... */
9698 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9699 if (GCPtrMem & 31)
9700 return iemRaiseGeneralProtectionFault0(pVCpu);
9701
9702 PCRTUINT256U pu256Src;
9703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9704 if (rc == VINF_SUCCESS)
9705 {
9706 pu256Dst->au64[0] = pu256Src->au64[0];
9707 pu256Dst->au64[1] = pu256Src->au64[1];
9708 pu256Dst->au64[2] = pu256Src->au64[2];
9709 pu256Dst->au64[3] = pu256Src->au64[3];
9710 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9711 }
9712 return rc;
9713}
9714
9715
9716#ifdef IEM_WITH_SETJMP
9717/**
9718 * Fetches a data oword (octo word) at an aligned address, generally AVX
9719 * related, longjmp on error.
9720 *
9721 * Raises \#GP(0) if not aligned.
9722 *
9723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9724 * @param pu256Dst Where to return the qword.
9725 * @param iSegReg The index of the segment register to use for
9726 * this access. The base and limits are checked.
9727 * @param GCPtrMem The address of the guest memory.
9728 */
9729DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9730{
9731 /* The lazy approach for now... */
9732 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9733 if ((GCPtrMem & 31) == 0)
9734 {
9735 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9736 pu256Dst->au64[0] = pu256Src->au64[0];
9737 pu256Dst->au64[1] = pu256Src->au64[1];
9738 pu256Dst->au64[2] = pu256Src->au64[2];
9739 pu256Dst->au64[3] = pu256Src->au64[3];
9740 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9741 return;
9742 }
9743
9744 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9745 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9746}
9747#endif
9748
9749
9750
9751/**
9752 * Fetches a descriptor register (lgdt, lidt).
9753 *
9754 * @returns Strict VBox status code.
9755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9756 * @param pcbLimit Where to return the limit.
9757 * @param pGCPtrBase Where to return the base.
9758 * @param iSegReg The index of the segment register to use for
9759 * this access. The base and limits are checked.
9760 * @param GCPtrMem The address of the guest memory.
9761 * @param enmOpSize The effective operand size.
9762 */
9763IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9764 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9765{
9766 /*
9767 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9768 * little special:
9769 * - The two reads are done separately.
9770 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9771 * - We suspect the 386 to actually commit the limit before the base in
9772 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9773 * don't try emulate this eccentric behavior, because it's not well
9774 * enough understood and rather hard to trigger.
9775 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9776 */
9777 VBOXSTRICTRC rcStrict;
9778 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9779 {
9780 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9781 if (rcStrict == VINF_SUCCESS)
9782 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9783 }
9784 else
9785 {
9786 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9787 if (enmOpSize == IEMMODE_32BIT)
9788 {
9789 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9790 {
9791 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9792 if (rcStrict == VINF_SUCCESS)
9793 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9794 }
9795 else
9796 {
9797 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9798 if (rcStrict == VINF_SUCCESS)
9799 {
9800 *pcbLimit = (uint16_t)uTmp;
9801 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9802 }
9803 }
9804 if (rcStrict == VINF_SUCCESS)
9805 *pGCPtrBase = uTmp;
9806 }
9807 else
9808 {
9809 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9810 if (rcStrict == VINF_SUCCESS)
9811 {
9812 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9813 if (rcStrict == VINF_SUCCESS)
9814 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9815 }
9816 }
9817 }
9818 return rcStrict;
9819}
9820
9821
9822
9823/**
9824 * Stores a data byte.
9825 *
9826 * @returns Strict VBox status code.
9827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9828 * @param iSegReg The index of the segment register to use for
9829 * this access. The base and limits are checked.
9830 * @param GCPtrMem The address of the guest memory.
9831 * @param u8Value The value to store.
9832 */
9833IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9834{
9835 /* The lazy approach for now... */
9836 uint8_t *pu8Dst;
9837 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9838 if (rc == VINF_SUCCESS)
9839 {
9840 *pu8Dst = u8Value;
9841 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9842 }
9843 return rc;
9844}
9845
9846
9847#ifdef IEM_WITH_SETJMP
9848/**
9849 * Stores a data byte, longjmp on error.
9850 *
9851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9852 * @param iSegReg The index of the segment register to use for
9853 * this access. The base and limits are checked.
9854 * @param GCPtrMem The address of the guest memory.
9855 * @param u8Value The value to store.
9856 */
9857IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9858{
9859 /* The lazy approach for now... */
9860 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9861 *pu8Dst = u8Value;
9862 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9863}
9864#endif
9865
9866
9867/**
9868 * Stores a data word.
9869 *
9870 * @returns Strict VBox status code.
9871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9872 * @param iSegReg The index of the segment register to use for
9873 * this access. The base and limits are checked.
9874 * @param GCPtrMem The address of the guest memory.
9875 * @param u16Value The value to store.
9876 */
9877IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9878{
9879 /* The lazy approach for now... */
9880 uint16_t *pu16Dst;
9881 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9882 if (rc == VINF_SUCCESS)
9883 {
9884 *pu16Dst = u16Value;
9885 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9886 }
9887 return rc;
9888}
9889
9890
9891#ifdef IEM_WITH_SETJMP
9892/**
9893 * Stores a data word, longjmp on error.
9894 *
9895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9896 * @param iSegReg The index of the segment register to use for
9897 * this access. The base and limits are checked.
9898 * @param GCPtrMem The address of the guest memory.
9899 * @param u16Value The value to store.
9900 */
9901IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9902{
9903 /* The lazy approach for now... */
9904 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9905 *pu16Dst = u16Value;
9906 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9907}
9908#endif
9909
9910
9911/**
9912 * Stores a data dword.
9913 *
9914 * @returns Strict VBox status code.
9915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9916 * @param iSegReg The index of the segment register to use for
9917 * this access. The base and limits are checked.
9918 * @param GCPtrMem The address of the guest memory.
9919 * @param u32Value The value to store.
9920 */
9921IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9922{
9923 /* The lazy approach for now... */
9924 uint32_t *pu32Dst;
9925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9926 if (rc == VINF_SUCCESS)
9927 {
9928 *pu32Dst = u32Value;
9929 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9930 }
9931 return rc;
9932}
9933
9934
9935#ifdef IEM_WITH_SETJMP
9936/**
9937 * Stores a data dword.
9938 *
9939 * @returns Strict VBox status code.
9940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9941 * @param iSegReg The index of the segment register to use for
9942 * this access. The base and limits are checked.
9943 * @param GCPtrMem The address of the guest memory.
9944 * @param u32Value The value to store.
9945 */
9946IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9947{
9948 /* The lazy approach for now... */
9949 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9950 *pu32Dst = u32Value;
9951 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9952}
9953#endif
9954
9955
9956/**
9957 * Stores a data qword.
9958 *
9959 * @returns Strict VBox status code.
9960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9961 * @param iSegReg The index of the segment register to use for
9962 * this access. The base and limits are checked.
9963 * @param GCPtrMem The address of the guest memory.
9964 * @param u64Value The value to store.
9965 */
9966IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9967{
9968 /* The lazy approach for now... */
9969 uint64_t *pu64Dst;
9970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9971 if (rc == VINF_SUCCESS)
9972 {
9973 *pu64Dst = u64Value;
9974 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9975 }
9976 return rc;
9977}
9978
9979
9980#ifdef IEM_WITH_SETJMP
9981/**
9982 * Stores a data qword, longjmp on error.
9983 *
9984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9985 * @param iSegReg The index of the segment register to use for
9986 * this access. The base and limits are checked.
9987 * @param GCPtrMem The address of the guest memory.
9988 * @param u64Value The value to store.
9989 */
9990IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9991{
9992 /* The lazy approach for now... */
9993 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9994 *pu64Dst = u64Value;
9995 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9996}
9997#endif
9998
9999
10000/**
10001 * Stores a data dqword.
10002 *
10003 * @returns Strict VBox status code.
10004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10005 * @param iSegReg The index of the segment register to use for
10006 * this access. The base and limits are checked.
10007 * @param GCPtrMem The address of the guest memory.
10008 * @param u128Value The value to store.
10009 */
10010IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10011{
10012 /* The lazy approach for now... */
10013 PRTUINT128U pu128Dst;
10014 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10015 if (rc == VINF_SUCCESS)
10016 {
10017 pu128Dst->au64[0] = u128Value.au64[0];
10018 pu128Dst->au64[1] = u128Value.au64[1];
10019 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10020 }
10021 return rc;
10022}
10023
10024
10025#ifdef IEM_WITH_SETJMP
10026/**
10027 * Stores a data dqword, longjmp on error.
10028 *
10029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10030 * @param iSegReg The index of the segment register to use for
10031 * this access. The base and limits are checked.
10032 * @param GCPtrMem The address of the guest memory.
10033 * @param u128Value The value to store.
10034 */
10035IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10036{
10037 /* The lazy approach for now... */
10038 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10039 pu128Dst->au64[0] = u128Value.au64[0];
10040 pu128Dst->au64[1] = u128Value.au64[1];
10041 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10042}
10043#endif
10044
10045
10046/**
10047 * Stores a data dqword, SSE aligned.
10048 *
10049 * @returns Strict VBox status code.
10050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10051 * @param iSegReg The index of the segment register to use for
10052 * this access. The base and limits are checked.
10053 * @param GCPtrMem The address of the guest memory.
10054 * @param u128Value The value to store.
10055 */
10056IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10057{
10058 /* The lazy approach for now... */
10059 if ( (GCPtrMem & 15)
10060 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10061 return iemRaiseGeneralProtectionFault0(pVCpu);
10062
10063 PRTUINT128U pu128Dst;
10064 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10065 if (rc == VINF_SUCCESS)
10066 {
10067 pu128Dst->au64[0] = u128Value.au64[0];
10068 pu128Dst->au64[1] = u128Value.au64[1];
10069 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10070 }
10071 return rc;
10072}
10073
10074
10075#ifdef IEM_WITH_SETJMP
10076/**
10077 * Stores a data dqword, SSE aligned.
10078 *
10079 * @returns Strict VBox status code.
10080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10081 * @param iSegReg The index of the segment register to use for
10082 * this access. The base and limits are checked.
10083 * @param GCPtrMem The address of the guest memory.
10084 * @param u128Value The value to store.
10085 */
10086DECL_NO_INLINE(IEM_STATIC, void)
10087iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10088{
10089 /* The lazy approach for now... */
10090 if ( (GCPtrMem & 15) == 0
10091 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10092 {
10093 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10094 pu128Dst->au64[0] = u128Value.au64[0];
10095 pu128Dst->au64[1] = u128Value.au64[1];
10096 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10097 return;
10098 }
10099
10100 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10101 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10102}
10103#endif
10104
10105
10106/**
10107 * Stores a data dqword.
10108 *
10109 * @returns Strict VBox status code.
10110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10111 * @param iSegReg The index of the segment register to use for
10112 * this access. The base and limits are checked.
10113 * @param GCPtrMem The address of the guest memory.
10114 * @param pu256Value Pointer to the value to store.
10115 */
10116IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10117{
10118 /* The lazy approach for now... */
10119 PRTUINT256U pu256Dst;
10120 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10121 if (rc == VINF_SUCCESS)
10122 {
10123 pu256Dst->au64[0] = pu256Value->au64[0];
10124 pu256Dst->au64[1] = pu256Value->au64[1];
10125 pu256Dst->au64[2] = pu256Value->au64[2];
10126 pu256Dst->au64[3] = pu256Value->au64[3];
10127 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10128 }
10129 return rc;
10130}
10131
10132
10133#ifdef IEM_WITH_SETJMP
10134/**
10135 * Stores a data dqword, longjmp on error.
10136 *
10137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10138 * @param iSegReg The index of the segment register to use for
10139 * this access. The base and limits are checked.
10140 * @param GCPtrMem The address of the guest memory.
10141 * @param pu256Value Pointer to the value to store.
10142 */
10143IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10144{
10145 /* The lazy approach for now... */
10146 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10147 pu256Dst->au64[0] = pu256Value->au64[0];
10148 pu256Dst->au64[1] = pu256Value->au64[1];
10149 pu256Dst->au64[2] = pu256Value->au64[2];
10150 pu256Dst->au64[3] = pu256Value->au64[3];
10151 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10152}
10153#endif
10154
10155
10156/**
10157 * Stores a data dqword, AVX aligned.
10158 *
10159 * @returns Strict VBox status code.
10160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10161 * @param iSegReg The index of the segment register to use for
10162 * this access. The base and limits are checked.
10163 * @param GCPtrMem The address of the guest memory.
10164 * @param pu256Value Pointer to the value to store.
10165 */
10166IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10167{
10168 /* The lazy approach for now... */
10169 if (GCPtrMem & 31)
10170 return iemRaiseGeneralProtectionFault0(pVCpu);
10171
10172 PRTUINT256U pu256Dst;
10173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10174 if (rc == VINF_SUCCESS)
10175 {
10176 pu256Dst->au64[0] = pu256Value->au64[0];
10177 pu256Dst->au64[1] = pu256Value->au64[1];
10178 pu256Dst->au64[2] = pu256Value->au64[2];
10179 pu256Dst->au64[3] = pu256Value->au64[3];
10180 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10181 }
10182 return rc;
10183}
10184
10185
10186#ifdef IEM_WITH_SETJMP
10187/**
10188 * Stores a data dqword, AVX aligned.
10189 *
10190 * @returns Strict VBox status code.
10191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10192 * @param iSegReg The index of the segment register to use for
10193 * this access. The base and limits are checked.
10194 * @param GCPtrMem The address of the guest memory.
10195 * @param pu256Value Pointer to the value to store.
10196 */
10197DECL_NO_INLINE(IEM_STATIC, void)
10198iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10199{
10200 /* The lazy approach for now... */
10201 if ((GCPtrMem & 31) == 0)
10202 {
10203 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10204 pu256Dst->au64[0] = pu256Value->au64[0];
10205 pu256Dst->au64[1] = pu256Value->au64[1];
10206 pu256Dst->au64[2] = pu256Value->au64[2];
10207 pu256Dst->au64[3] = pu256Value->au64[3];
10208 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10209 return;
10210 }
10211
10212 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10213 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10214}
10215#endif
10216
10217
10218/**
10219 * Stores a descriptor register (sgdt, sidt).
10220 *
10221 * @returns Strict VBox status code.
10222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10223 * @param cbLimit The limit.
10224 * @param GCPtrBase The base address.
10225 * @param iSegReg The index of the segment register to use for
10226 * this access. The base and limits are checked.
10227 * @param GCPtrMem The address of the guest memory.
10228 */
10229IEM_STATIC VBOXSTRICTRC
10230iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10231{
10232 VBOXSTRICTRC rcStrict;
10233 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10234 {
10235 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10236 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10237 }
10238
10239 /*
10240 * The SIDT and SGDT instructions actually stores the data using two
10241 * independent writes. The instructions does not respond to opsize prefixes.
10242 */
10243 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10244 if (rcStrict == VINF_SUCCESS)
10245 {
10246 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10247 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10248 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10249 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10250 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10251 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10252 else
10253 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10254 }
10255 return rcStrict;
10256}
10257
10258
10259/**
10260 * Pushes a word onto the stack.
10261 *
10262 * @returns Strict VBox status code.
10263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10264 * @param u16Value The value to push.
10265 */
10266IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10267{
10268 /* Increment the stack pointer. */
10269 uint64_t uNewRsp;
10270 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10271 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10272
10273 /* Write the word the lazy way. */
10274 uint16_t *pu16Dst;
10275 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10276 if (rc == VINF_SUCCESS)
10277 {
10278 *pu16Dst = u16Value;
10279 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10280 }
10281
10282 /* Commit the new RSP value unless we an access handler made trouble. */
10283 if (rc == VINF_SUCCESS)
10284 pCtx->rsp = uNewRsp;
10285
10286 return rc;
10287}
10288
10289
10290/**
10291 * Pushes a dword onto the stack.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10295 * @param u32Value The value to push.
10296 */
10297IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10298{
10299 /* Increment the stack pointer. */
10300 uint64_t uNewRsp;
10301 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10302 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10303
10304 /* Write the dword the lazy way. */
10305 uint32_t *pu32Dst;
10306 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10307 if (rc == VINF_SUCCESS)
10308 {
10309 *pu32Dst = u32Value;
10310 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10311 }
10312
10313 /* Commit the new RSP value unless we an access handler made trouble. */
10314 if (rc == VINF_SUCCESS)
10315 pCtx->rsp = uNewRsp;
10316
10317 return rc;
10318}
10319
10320
10321/**
10322 * Pushes a dword segment register value onto the stack.
10323 *
10324 * @returns Strict VBox status code.
10325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10326 * @param u32Value The value to push.
10327 */
10328IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10329{
10330 /* Increment the stack pointer. */
10331 uint64_t uNewRsp;
10332 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10333 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10334
10335 VBOXSTRICTRC rc;
10336 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10337 {
10338 /* The recompiler writes a full dword. */
10339 uint32_t *pu32Dst;
10340 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10341 if (rc == VINF_SUCCESS)
10342 {
10343 *pu32Dst = u32Value;
10344 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10345 }
10346 }
10347 else
10348 {
10349 /* The intel docs talks about zero extending the selector register
10350 value. My actual intel CPU here might be zero extending the value
10351 but it still only writes the lower word... */
10352 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10353 * happens when crossing an electric page boundrary, is the high word checked
10354 * for write accessibility or not? Probably it is. What about segment limits?
10355 * It appears this behavior is also shared with trap error codes.
10356 *
10357 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10358 * ancient hardware when it actually did change. */
10359 uint16_t *pu16Dst;
10360 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10361 if (rc == VINF_SUCCESS)
10362 {
10363 *pu16Dst = (uint16_t)u32Value;
10364 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10365 }
10366 }
10367
10368 /* Commit the new RSP value unless we an access handler made trouble. */
10369 if (rc == VINF_SUCCESS)
10370 pCtx->rsp = uNewRsp;
10371
10372 return rc;
10373}
10374
10375
10376/**
10377 * Pushes a qword onto the stack.
10378 *
10379 * @returns Strict VBox status code.
10380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10381 * @param u64Value The value to push.
10382 */
10383IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10384{
10385 /* Increment the stack pointer. */
10386 uint64_t uNewRsp;
10387 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10388 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10389
10390 /* Write the word the lazy way. */
10391 uint64_t *pu64Dst;
10392 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10393 if (rc == VINF_SUCCESS)
10394 {
10395 *pu64Dst = u64Value;
10396 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10397 }
10398
10399 /* Commit the new RSP value unless we an access handler made trouble. */
10400 if (rc == VINF_SUCCESS)
10401 pCtx->rsp = uNewRsp;
10402
10403 return rc;
10404}
10405
10406
10407/**
10408 * Pops a word from the stack.
10409 *
10410 * @returns Strict VBox status code.
10411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10412 * @param pu16Value Where to store the popped value.
10413 */
10414IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10415{
10416 /* Increment the stack pointer. */
10417 uint64_t uNewRsp;
10418 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10419 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10420
10421 /* Write the word the lazy way. */
10422 uint16_t const *pu16Src;
10423 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10424 if (rc == VINF_SUCCESS)
10425 {
10426 *pu16Value = *pu16Src;
10427 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10428
10429 /* Commit the new RSP value. */
10430 if (rc == VINF_SUCCESS)
10431 pCtx->rsp = uNewRsp;
10432 }
10433
10434 return rc;
10435}
10436
10437
10438/**
10439 * Pops a dword from the stack.
10440 *
10441 * @returns Strict VBox status code.
10442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10443 * @param pu32Value Where to store the popped value.
10444 */
10445IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10446{
10447 /* Increment the stack pointer. */
10448 uint64_t uNewRsp;
10449 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10450 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10451
10452 /* Write the word the lazy way. */
10453 uint32_t const *pu32Src;
10454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10455 if (rc == VINF_SUCCESS)
10456 {
10457 *pu32Value = *pu32Src;
10458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10459
10460 /* Commit the new RSP value. */
10461 if (rc == VINF_SUCCESS)
10462 pCtx->rsp = uNewRsp;
10463 }
10464
10465 return rc;
10466}
10467
10468
10469/**
10470 * Pops a qword from the stack.
10471 *
10472 * @returns Strict VBox status code.
10473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10474 * @param pu64Value Where to store the popped value.
10475 */
10476IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10477{
10478 /* Increment the stack pointer. */
10479 uint64_t uNewRsp;
10480 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10481 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10482
10483 /* Write the word the lazy way. */
10484 uint64_t const *pu64Src;
10485 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10486 if (rc == VINF_SUCCESS)
10487 {
10488 *pu64Value = *pu64Src;
10489 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10490
10491 /* Commit the new RSP value. */
10492 if (rc == VINF_SUCCESS)
10493 pCtx->rsp = uNewRsp;
10494 }
10495
10496 return rc;
10497}
10498
10499
10500/**
10501 * Pushes a word onto the stack, using a temporary stack pointer.
10502 *
10503 * @returns Strict VBox status code.
10504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10505 * @param u16Value The value to push.
10506 * @param pTmpRsp Pointer to the temporary stack pointer.
10507 */
10508IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10509{
10510 /* Increment the stack pointer. */
10511 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10512 RTUINT64U NewRsp = *pTmpRsp;
10513 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10514
10515 /* Write the word the lazy way. */
10516 uint16_t *pu16Dst;
10517 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10518 if (rc == VINF_SUCCESS)
10519 {
10520 *pu16Dst = u16Value;
10521 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10522 }
10523
10524 /* Commit the new RSP value unless we an access handler made trouble. */
10525 if (rc == VINF_SUCCESS)
10526 *pTmpRsp = NewRsp;
10527
10528 return rc;
10529}
10530
10531
10532/**
10533 * Pushes a dword onto the stack, using a temporary stack pointer.
10534 *
10535 * @returns Strict VBox status code.
10536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10537 * @param u32Value The value to push.
10538 * @param pTmpRsp Pointer to the temporary stack pointer.
10539 */
10540IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10541{
10542 /* Increment the stack pointer. */
10543 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10544 RTUINT64U NewRsp = *pTmpRsp;
10545 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10546
10547 /* Write the word the lazy way. */
10548 uint32_t *pu32Dst;
10549 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10550 if (rc == VINF_SUCCESS)
10551 {
10552 *pu32Dst = u32Value;
10553 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10554 }
10555
10556 /* Commit the new RSP value unless we an access handler made trouble. */
10557 if (rc == VINF_SUCCESS)
10558 *pTmpRsp = NewRsp;
10559
10560 return rc;
10561}
10562
10563
10564/**
10565 * Pushes a dword onto the stack, using a temporary stack pointer.
10566 *
10567 * @returns Strict VBox status code.
10568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10569 * @param u64Value The value to push.
10570 * @param pTmpRsp Pointer to the temporary stack pointer.
10571 */
10572IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10573{
10574 /* Increment the stack pointer. */
10575 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10576 RTUINT64U NewRsp = *pTmpRsp;
10577 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10578
10579 /* Write the word the lazy way. */
10580 uint64_t *pu64Dst;
10581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10582 if (rc == VINF_SUCCESS)
10583 {
10584 *pu64Dst = u64Value;
10585 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10586 }
10587
10588 /* Commit the new RSP value unless we an access handler made trouble. */
10589 if (rc == VINF_SUCCESS)
10590 *pTmpRsp = NewRsp;
10591
10592 return rc;
10593}
10594
10595
10596/**
10597 * Pops a word from the stack, using a temporary stack pointer.
10598 *
10599 * @returns Strict VBox status code.
10600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10601 * @param pu16Value Where to store the popped value.
10602 * @param pTmpRsp Pointer to the temporary stack pointer.
10603 */
10604IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10605{
10606 /* Increment the stack pointer. */
10607 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10608 RTUINT64U NewRsp = *pTmpRsp;
10609 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10610
10611 /* Write the word the lazy way. */
10612 uint16_t const *pu16Src;
10613 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10614 if (rc == VINF_SUCCESS)
10615 {
10616 *pu16Value = *pu16Src;
10617 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10618
10619 /* Commit the new RSP value. */
10620 if (rc == VINF_SUCCESS)
10621 *pTmpRsp = NewRsp;
10622 }
10623
10624 return rc;
10625}
10626
10627
10628/**
10629 * Pops a dword from the stack, using a temporary stack pointer.
10630 *
10631 * @returns Strict VBox status code.
10632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10633 * @param pu32Value Where to store the popped value.
10634 * @param pTmpRsp Pointer to the temporary stack pointer.
10635 */
10636IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10637{
10638 /* Increment the stack pointer. */
10639 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10640 RTUINT64U NewRsp = *pTmpRsp;
10641 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10642
10643 /* Write the word the lazy way. */
10644 uint32_t const *pu32Src;
10645 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10646 if (rc == VINF_SUCCESS)
10647 {
10648 *pu32Value = *pu32Src;
10649 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10650
10651 /* Commit the new RSP value. */
10652 if (rc == VINF_SUCCESS)
10653 *pTmpRsp = NewRsp;
10654 }
10655
10656 return rc;
10657}
10658
10659
10660/**
10661 * Pops a qword from the stack, using a temporary stack pointer.
10662 *
10663 * @returns Strict VBox status code.
10664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10665 * @param pu64Value Where to store the popped value.
10666 * @param pTmpRsp Pointer to the temporary stack pointer.
10667 */
10668IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10669{
10670 /* Increment the stack pointer. */
10671 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10672 RTUINT64U NewRsp = *pTmpRsp;
10673 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10674
10675 /* Write the word the lazy way. */
10676 uint64_t const *pu64Src;
10677 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10678 if (rcStrict == VINF_SUCCESS)
10679 {
10680 *pu64Value = *pu64Src;
10681 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10682
10683 /* Commit the new RSP value. */
10684 if (rcStrict == VINF_SUCCESS)
10685 *pTmpRsp = NewRsp;
10686 }
10687
10688 return rcStrict;
10689}
10690
10691
10692/**
10693 * Begin a special stack push (used by interrupt, exceptions and such).
10694 *
10695 * This will raise \#SS or \#PF if appropriate.
10696 *
10697 * @returns Strict VBox status code.
10698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10699 * @param cbMem The number of bytes to push onto the stack.
10700 * @param ppvMem Where to return the pointer to the stack memory.
10701 * As with the other memory functions this could be
10702 * direct access or bounce buffered access, so
10703 * don't commit register until the commit call
10704 * succeeds.
10705 * @param puNewRsp Where to return the new RSP value. This must be
10706 * passed unchanged to
10707 * iemMemStackPushCommitSpecial().
10708 */
10709IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10710{
10711 Assert(cbMem < UINT8_MAX);
10712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10713 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10714 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10715}
10716
10717
10718/**
10719 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10720 *
10721 * This will update the rSP.
10722 *
10723 * @returns Strict VBox status code.
10724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10725 * @param pvMem The pointer returned by
10726 * iemMemStackPushBeginSpecial().
10727 * @param uNewRsp The new RSP value returned by
10728 * iemMemStackPushBeginSpecial().
10729 */
10730IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10731{
10732 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10733 if (rcStrict == VINF_SUCCESS)
10734 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10735 return rcStrict;
10736}
10737
10738
10739/**
10740 * Begin a special stack pop (used by iret, retf and such).
10741 *
10742 * This will raise \#SS or \#PF if appropriate.
10743 *
10744 * @returns Strict VBox status code.
10745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10746 * @param cbMem The number of bytes to pop from the stack.
10747 * @param ppvMem Where to return the pointer to the stack memory.
10748 * @param puNewRsp Where to return the new RSP value. This must be
10749 * assigned to CPUMCTX::rsp manually some time
10750 * after iemMemStackPopDoneSpecial() has been
10751 * called.
10752 */
10753IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10754{
10755 Assert(cbMem < UINT8_MAX);
10756 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10757 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10758 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10759}
10760
10761
10762/**
10763 * Continue a special stack pop (used by iret and retf).
10764 *
10765 * This will raise \#SS or \#PF if appropriate.
10766 *
10767 * @returns Strict VBox status code.
10768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10769 * @param cbMem The number of bytes to pop from the stack.
10770 * @param ppvMem Where to return the pointer to the stack memory.
10771 * @param puNewRsp Where to return the new RSP value. This must be
10772 * assigned to CPUMCTX::rsp manually some time
10773 * after iemMemStackPopDoneSpecial() has been
10774 * called.
10775 */
10776IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10777{
10778 Assert(cbMem < UINT8_MAX);
10779 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10780 RTUINT64U NewRsp;
10781 NewRsp.u = *puNewRsp;
10782 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10783 *puNewRsp = NewRsp.u;
10784 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10785}
10786
10787
10788/**
10789 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10790 * iemMemStackPopContinueSpecial).
10791 *
10792 * The caller will manually commit the rSP.
10793 *
10794 * @returns Strict VBox status code.
10795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10796 * @param pvMem The pointer returned by
10797 * iemMemStackPopBeginSpecial() or
10798 * iemMemStackPopContinueSpecial().
10799 */
10800IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10801{
10802 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10803}
10804
10805
10806/**
10807 * Fetches a system table byte.
10808 *
10809 * @returns Strict VBox status code.
10810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10811 * @param pbDst Where to return the byte.
10812 * @param iSegReg The index of the segment register to use for
10813 * this access. The base and limits are checked.
10814 * @param GCPtrMem The address of the guest memory.
10815 */
10816IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10817{
10818 /* The lazy approach for now... */
10819 uint8_t const *pbSrc;
10820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10821 if (rc == VINF_SUCCESS)
10822 {
10823 *pbDst = *pbSrc;
10824 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10825 }
10826 return rc;
10827}
10828
10829
10830/**
10831 * Fetches a system table word.
10832 *
10833 * @returns Strict VBox status code.
10834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10835 * @param pu16Dst Where to return the word.
10836 * @param iSegReg The index of the segment register to use for
10837 * this access. The base and limits are checked.
10838 * @param GCPtrMem The address of the guest memory.
10839 */
10840IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10841{
10842 /* The lazy approach for now... */
10843 uint16_t const *pu16Src;
10844 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10845 if (rc == VINF_SUCCESS)
10846 {
10847 *pu16Dst = *pu16Src;
10848 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10849 }
10850 return rc;
10851}
10852
10853
10854/**
10855 * Fetches a system table dword.
10856 *
10857 * @returns Strict VBox status code.
10858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10859 * @param pu32Dst Where to return the dword.
10860 * @param iSegReg The index of the segment register to use for
10861 * this access. The base and limits are checked.
10862 * @param GCPtrMem The address of the guest memory.
10863 */
10864IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10865{
10866 /* The lazy approach for now... */
10867 uint32_t const *pu32Src;
10868 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10869 if (rc == VINF_SUCCESS)
10870 {
10871 *pu32Dst = *pu32Src;
10872 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10873 }
10874 return rc;
10875}
10876
10877
10878/**
10879 * Fetches a system table qword.
10880 *
10881 * @returns Strict VBox status code.
10882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10883 * @param pu64Dst Where to return the qword.
10884 * @param iSegReg The index of the segment register to use for
10885 * this access. The base and limits are checked.
10886 * @param GCPtrMem The address of the guest memory.
10887 */
10888IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10889{
10890 /* The lazy approach for now... */
10891 uint64_t const *pu64Src;
10892 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10893 if (rc == VINF_SUCCESS)
10894 {
10895 *pu64Dst = *pu64Src;
10896 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10897 }
10898 return rc;
10899}
10900
10901
10902/**
10903 * Fetches a descriptor table entry with caller specified error code.
10904 *
10905 * @returns Strict VBox status code.
10906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10907 * @param pDesc Where to return the descriptor table entry.
10908 * @param uSel The selector which table entry to fetch.
10909 * @param uXcpt The exception to raise on table lookup error.
10910 * @param uErrorCode The error code associated with the exception.
10911 */
10912IEM_STATIC VBOXSTRICTRC
10913iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10914{
10915 AssertPtr(pDesc);
10916 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10917
10918 /** @todo did the 286 require all 8 bytes to be accessible? */
10919 /*
10920 * Get the selector table base and check bounds.
10921 */
10922 RTGCPTR GCPtrBase;
10923 if (uSel & X86_SEL_LDT)
10924 {
10925 if ( !pCtx->ldtr.Attr.n.u1Present
10926 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10927 {
10928 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10929 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10930 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10931 uErrorCode, 0);
10932 }
10933
10934 Assert(pCtx->ldtr.Attr.n.u1Present);
10935 GCPtrBase = pCtx->ldtr.u64Base;
10936 }
10937 else
10938 {
10939 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10940 {
10941 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10942 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10943 uErrorCode, 0);
10944 }
10945 GCPtrBase = pCtx->gdtr.pGdt;
10946 }
10947
10948 /*
10949 * Read the legacy descriptor and maybe the long mode extensions if
10950 * required.
10951 */
10952 VBOXSTRICTRC rcStrict;
10953 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10954 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10955 else
10956 {
10957 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10958 if (rcStrict == VINF_SUCCESS)
10959 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10960 if (rcStrict == VINF_SUCCESS)
10961 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10962 if (rcStrict == VINF_SUCCESS)
10963 pDesc->Legacy.au16[3] = 0;
10964 else
10965 return rcStrict;
10966 }
10967
10968 if (rcStrict == VINF_SUCCESS)
10969 {
10970 if ( !IEM_IS_LONG_MODE(pVCpu)
10971 || pDesc->Legacy.Gen.u1DescType)
10972 pDesc->Long.au64[1] = 0;
10973 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10974 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10975 else
10976 {
10977 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10978 /** @todo is this the right exception? */
10979 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10980 }
10981 }
10982 return rcStrict;
10983}
10984
10985
10986/**
10987 * Fetches a descriptor table entry.
10988 *
10989 * @returns Strict VBox status code.
10990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10991 * @param pDesc Where to return the descriptor table entry.
10992 * @param uSel The selector which table entry to fetch.
10993 * @param uXcpt The exception to raise on table lookup error.
10994 */
10995IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10996{
10997 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10998}
10999
11000
11001/**
11002 * Fakes a long mode stack selector for SS = 0.
11003 *
11004 * @param pDescSs Where to return the fake stack descriptor.
11005 * @param uDpl The DPL we want.
11006 */
11007IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11008{
11009 pDescSs->Long.au64[0] = 0;
11010 pDescSs->Long.au64[1] = 0;
11011 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11012 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11013 pDescSs->Long.Gen.u2Dpl = uDpl;
11014 pDescSs->Long.Gen.u1Present = 1;
11015 pDescSs->Long.Gen.u1Long = 1;
11016}
11017
11018
11019/**
11020 * Marks the selector descriptor as accessed (only non-system descriptors).
11021 *
11022 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11023 * will therefore skip the limit checks.
11024 *
11025 * @returns Strict VBox status code.
11026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11027 * @param uSel The selector.
11028 */
11029IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11030{
11031 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11032
11033 /*
11034 * Get the selector table base and calculate the entry address.
11035 */
11036 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11037 ? pCtx->ldtr.u64Base
11038 : pCtx->gdtr.pGdt;
11039 GCPtr += uSel & X86_SEL_MASK;
11040
11041 /*
11042 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11043 * ugly stuff to avoid this. This will make sure it's an atomic access
11044 * as well more or less remove any question about 8-bit or 32-bit accesss.
11045 */
11046 VBOXSTRICTRC rcStrict;
11047 uint32_t volatile *pu32;
11048 if ((GCPtr & 3) == 0)
11049 {
11050 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11051 GCPtr += 2 + 2;
11052 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11053 if (rcStrict != VINF_SUCCESS)
11054 return rcStrict;
11055 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11056 }
11057 else
11058 {
11059 /* The misaligned GDT/LDT case, map the whole thing. */
11060 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11061 if (rcStrict != VINF_SUCCESS)
11062 return rcStrict;
11063 switch ((uintptr_t)pu32 & 3)
11064 {
11065 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11066 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11067 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11068 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11069 }
11070 }
11071
11072 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11073}
11074
11075/** @} */
11076
11077
11078/*
11079 * Include the C/C++ implementation of instruction.
11080 */
11081#include "IEMAllCImpl.cpp.h"
11082
11083
11084
11085/** @name "Microcode" macros.
11086 *
11087 * The idea is that we should be able to use the same code to interpret
11088 * instructions as well as recompiler instructions. Thus this obfuscation.
11089 *
11090 * @{
11091 */
11092#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11093#define IEM_MC_END() }
11094#define IEM_MC_PAUSE() do {} while (0)
11095#define IEM_MC_CONTINUE() do {} while (0)
11096
11097/** Internal macro. */
11098#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11099 do \
11100 { \
11101 VBOXSTRICTRC rcStrict2 = a_Expr; \
11102 if (rcStrict2 != VINF_SUCCESS) \
11103 return rcStrict2; \
11104 } while (0)
11105
11106
11107#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11108#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11109#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11110#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11111#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11112#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11113#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11114#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11115#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11116 do { \
11117 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11118 return iemRaiseDeviceNotAvailable(pVCpu); \
11119 } while (0)
11120#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11121 do { \
11122 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11123 return iemRaiseDeviceNotAvailable(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11126 do { \
11127 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11128 return iemRaiseMathFault(pVCpu); \
11129 } while (0)
11130#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11131 do { \
11132 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11133 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11134 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11135 return iemRaiseUndefinedOpcode(pVCpu); \
11136 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11137 return iemRaiseDeviceNotAvailable(pVCpu); \
11138 } while (0)
11139#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11140 do { \
11141 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11142 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11143 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11144 return iemRaiseUndefinedOpcode(pVCpu); \
11145 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11146 return iemRaiseDeviceNotAvailable(pVCpu); \
11147 } while (0)
11148#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11149 do { \
11150 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11151 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11152 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11153 return iemRaiseUndefinedOpcode(pVCpu); \
11154 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11155 return iemRaiseDeviceNotAvailable(pVCpu); \
11156 } while (0)
11157#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11158 do { \
11159 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11160 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11161 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11162 return iemRaiseUndefinedOpcode(pVCpu); \
11163 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11164 return iemRaiseDeviceNotAvailable(pVCpu); \
11165 } while (0)
11166#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11167 do { \
11168 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11169 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11170 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11171 return iemRaiseUndefinedOpcode(pVCpu); \
11172 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11173 return iemRaiseDeviceNotAvailable(pVCpu); \
11174 } while (0)
11175#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11176 do { \
11177 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11178 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11179 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11180 return iemRaiseUndefinedOpcode(pVCpu); \
11181 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11182 return iemRaiseDeviceNotAvailable(pVCpu); \
11183 } while (0)
11184#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11185 do { \
11186 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11187 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11188 return iemRaiseUndefinedOpcode(pVCpu); \
11189 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11190 return iemRaiseDeviceNotAvailable(pVCpu); \
11191 } while (0)
11192#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11193 do { \
11194 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11195 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11196 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11197 return iemRaiseUndefinedOpcode(pVCpu); \
11198 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11199 return iemRaiseDeviceNotAvailable(pVCpu); \
11200 } while (0)
11201#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11202 do { \
11203 if (pVCpu->iem.s.uCpl != 0) \
11204 return iemRaiseGeneralProtectionFault0(pVCpu); \
11205 } while (0)
11206#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11207 do { \
11208 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11209 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11210 } while (0)
11211
11212
11213#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11214#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11215#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11216#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11217#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11218#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11219#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11220 uint32_t a_Name; \
11221 uint32_t *a_pName = &a_Name
11222#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11223 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11224
11225#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11226#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11227
11228#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11233#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11234#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11235#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11236#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11237#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11238#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11239#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11240#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11241#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11242#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11243#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11244#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11245#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11246#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11247#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11248#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11249#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11250#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11251#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11252#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11253#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11254#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11255#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11256#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11257/** @note Not for IOPL or IF testing or modification. */
11258#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11259#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11260#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11261#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11262
11263#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11264#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11265#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11266#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11267#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11268#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11269#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11270#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11271#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11272#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11273#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11274 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11275
11276#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11277#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11278/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11279 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11280#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11281#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11282/** @note Not for IOPL or IF testing or modification. */
11283#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11284
11285#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11286#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11287#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11288 do { \
11289 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11290 *pu32Reg += (a_u32Value); \
11291 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11292 } while (0)
11293#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11294
11295#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11296#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11297#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11298 do { \
11299 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11300 *pu32Reg -= (a_u32Value); \
11301 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11302 } while (0)
11303#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11304#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11305
11306#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11307#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11308#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11309#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11310#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11311#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11312#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11313
11314#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11315#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11316#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11317#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11318
11319#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11320#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11321#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11322
11323#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11324#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11325#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11326
11327#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11328#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11329#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11330
11331#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11332#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11333#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11334
11335#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11336
11337#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11338
11339#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11340#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11341#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11342 do { \
11343 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11344 *pu32Reg &= (a_u32Value); \
11345 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11346 } while (0)
11347#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11348
11349#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11350#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11351#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11352 do { \
11353 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11354 *pu32Reg |= (a_u32Value); \
11355 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11356 } while (0)
11357#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11358
11359
11360/** @note Not for IOPL or IF modification. */
11361#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11362/** @note Not for IOPL or IF modification. */
11363#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11364/** @note Not for IOPL or IF modification. */
11365#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11366
11367#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11368
11369/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11370#define IEM_MC_FPU_TO_MMX_MODE() do { \
11371 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11372 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11373 } while (0)
11374
11375/** Switches the FPU state from MMX mode (FTW=0xffff). */
11376#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11377 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11378 } while (0)
11379
11380#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11381 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11382#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11383 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11384#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11385 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11386 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11387 } while (0)
11388#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11389 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11390 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11391 } while (0)
11392#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11393 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11394#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11395 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11396#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11397 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11398
11399#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11400 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11401 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11402 } while (0)
11403#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11404 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11405#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11406 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11407#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11408 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11409#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11410 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11411 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11412 } while (0)
11413#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11414 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11415#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11416 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11417 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11418 } while (0)
11419#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11420 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11421#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11422 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11423 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11424 } while (0)
11425#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11426 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11427#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11428 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11429#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11430 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11431#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11432 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11433#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11434 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11435 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11436 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11437 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11438 } while (0)
11439
11440#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11441 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11442 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11443 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11444 } while (0)
11445#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11446 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11447 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11448 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11449 } while (0)
11450#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11451 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11452 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11453 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11454 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11455 } while (0)
11456#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11457 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11458 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11459 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11460 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11461 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11462 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11463 } while (0)
11464
11465#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11466#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11467 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11468 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11469 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11470 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11471 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11472 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11473 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11474 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11475 } while (0)
11476#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11477 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11478 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11479 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11480 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11481 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11482 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11483 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11484 } while (0)
11485#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11486 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11487 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11488 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11489 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11490 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11491 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11492 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11493 } while (0)
11494#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11495 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11496 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11498 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11499 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11500 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11501 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11502 } while (0)
11503
11504#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11505 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11506#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11507 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11508#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11509 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11510#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11511 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11512 uintptr_t const iYRegTmp = (a_iYReg); \
11513 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11514 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11515 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11516 } while (0)
11517
11518#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11519 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11520 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11521 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11523 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11525 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11526 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11527 } while (0)
11528#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11529 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11530 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11531 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11533 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11535 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11536 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11537 } while (0)
11538#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11539 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11540 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11541 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11543 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11545 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11546 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11547 } while (0)
11548
11549#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11550 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11551 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11552 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11553 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11559 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11560 } while (0)
11561#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11562 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11563 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11564 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11565 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11571 } while (0)
11572#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11573 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11574 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11575 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11576 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11578 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11580 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11581 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11582 } while (0)
11583#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11584 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11585 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11586 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11587 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11588 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11591 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11592 } while (0)
11593
11594#ifndef IEM_WITH_SETJMP
11595# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11597# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11598 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11599# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11600 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11601#else
11602# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11603 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11604# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11605 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11606# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11607 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11608#endif
11609
11610#ifndef IEM_WITH_SETJMP
11611# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11613# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11615# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11616 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11617#else
11618# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11619 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11621 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11622# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11623 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11624#endif
11625
11626#ifndef IEM_WITH_SETJMP
11627# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11631# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11632 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11633#else
11634# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11635 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11636# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11637 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11638# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11639 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11640#endif
11641
11642#ifdef SOME_UNUSED_FUNCTION
11643# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11645#endif
11646
11647#ifndef IEM_WITH_SETJMP
11648# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11652# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11654# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11656#else
11657# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11658 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11659# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11660 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11661# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11662 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11663# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11664 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11665#endif
11666
11667#ifndef IEM_WITH_SETJMP
11668# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11670# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11672# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11674#else
11675# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11676 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11677# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11678 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11679# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11680 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11681#endif
11682
11683#ifndef IEM_WITH_SETJMP
11684# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11686# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11688#else
11689# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11690 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11691# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11692 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11693#endif
11694
11695#ifndef IEM_WITH_SETJMP
11696# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11700#else
11701# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11702 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11703# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11704 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11705#endif
11706
11707
11708
11709#ifndef IEM_WITH_SETJMP
11710# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11711 do { \
11712 uint8_t u8Tmp; \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11714 (a_u16Dst) = u8Tmp; \
11715 } while (0)
11716# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11717 do { \
11718 uint8_t u8Tmp; \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11720 (a_u32Dst) = u8Tmp; \
11721 } while (0)
11722# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11723 do { \
11724 uint8_t u8Tmp; \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11726 (a_u64Dst) = u8Tmp; \
11727 } while (0)
11728# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11729 do { \
11730 uint16_t u16Tmp; \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11732 (a_u32Dst) = u16Tmp; \
11733 } while (0)
11734# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11735 do { \
11736 uint16_t u16Tmp; \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11738 (a_u64Dst) = u16Tmp; \
11739 } while (0)
11740# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 do { \
11742 uint32_t u32Tmp; \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11744 (a_u64Dst) = u32Tmp; \
11745 } while (0)
11746#else /* IEM_WITH_SETJMP */
11747# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11752 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11753# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11754 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759#endif /* IEM_WITH_SETJMP */
11760
11761#ifndef IEM_WITH_SETJMP
11762# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11763 do { \
11764 uint8_t u8Tmp; \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11766 (a_u16Dst) = (int8_t)u8Tmp; \
11767 } while (0)
11768# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint8_t u8Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u32Dst) = (int8_t)u8Tmp; \
11773 } while (0)
11774# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11775 do { \
11776 uint8_t u8Tmp; \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11778 (a_u64Dst) = (int8_t)u8Tmp; \
11779 } while (0)
11780# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint16_t u16Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u32Dst) = (int16_t)u16Tmp; \
11785 } while (0)
11786# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11787 do { \
11788 uint16_t u16Tmp; \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11790 (a_u64Dst) = (int16_t)u16Tmp; \
11791 } while (0)
11792# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11793 do { \
11794 uint32_t u32Tmp; \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11796 (a_u64Dst) = (int32_t)u32Tmp; \
11797 } while (0)
11798#else /* IEM_WITH_SETJMP */
11799# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11800 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11801# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11802 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11803# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11804 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11805# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11806 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11807# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11808 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11809# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811#endif /* IEM_WITH_SETJMP */
11812
11813#ifndef IEM_WITH_SETJMP
11814# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11815 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11816# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11818# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11820# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11821 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11822#else
11823# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11824 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11825# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11826 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11827# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11828 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11829# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11830 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11831#endif
11832
11833#ifndef IEM_WITH_SETJMP
11834# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11836# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11837 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11838# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11839 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11840# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11842#else
11843# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11844 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11845# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11846 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11847# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11848 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11849# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11850 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11851#endif
11852
11853#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11854#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11855#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11856#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11857#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11858#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11859#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11860 do { \
11861 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11862 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11863 } while (0)
11864
11865#ifndef IEM_WITH_SETJMP
11866# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11868# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11870#else
11871# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11872 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11873# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11874 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11875#endif
11876
11877#ifndef IEM_WITH_SETJMP
11878# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11880# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11882#else
11883# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11884 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11885# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11886 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11887#endif
11888
11889
11890#define IEM_MC_PUSH_U16(a_u16Value) \
11891 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11892#define IEM_MC_PUSH_U32(a_u32Value) \
11893 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11894#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11896#define IEM_MC_PUSH_U64(a_u64Value) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11898
11899#define IEM_MC_POP_U16(a_pu16Value) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11901#define IEM_MC_POP_U32(a_pu32Value) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11903#define IEM_MC_POP_U64(a_pu64Value) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11905
11906/** Maps guest memory for direct or bounce buffered access.
11907 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11908 * @remarks May return.
11909 */
11910#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11912
11913/** Maps guest memory for direct or bounce buffered access.
11914 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11915 * @remarks May return.
11916 */
11917#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11919
11920/** Commits the memory and unmaps the guest memory.
11921 * @remarks May return.
11922 */
11923#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11925
11926/** Commits the memory and unmaps the guest memory unless the FPU status word
11927 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11928 * that would cause FLD not to store.
11929 *
11930 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11931 * store, while \#P will not.
11932 *
11933 * @remarks May in theory return - for now.
11934 */
11935#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11936 do { \
11937 if ( !(a_u16FSW & X86_FSW_ES) \
11938 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11939 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11940 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11941 } while (0)
11942
11943/** Calculate efficient address from R/M. */
11944#ifndef IEM_WITH_SETJMP
11945# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11946 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11947#else
11948# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11949 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11950#endif
11951
11952#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11953#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11954#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11955#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11956#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11957#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11958#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11959
11960/**
11961 * Defers the rest of the instruction emulation to a C implementation routine
11962 * and returns, only taking the standard parameters.
11963 *
11964 * @param a_pfnCImpl The pointer to the C routine.
11965 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11966 */
11967#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11968
11969/**
11970 * Defers the rest of instruction emulation to a C implementation routine and
11971 * returns, taking one argument in addition to the standard ones.
11972 *
11973 * @param a_pfnCImpl The pointer to the C routine.
11974 * @param a0 The argument.
11975 */
11976#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11977
11978/**
11979 * Defers the rest of the instruction emulation to a C implementation routine
11980 * and returns, taking two arguments in addition to the standard ones.
11981 *
11982 * @param a_pfnCImpl The pointer to the C routine.
11983 * @param a0 The first extra argument.
11984 * @param a1 The second extra argument.
11985 */
11986#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11987
11988/**
11989 * Defers the rest of the instruction emulation to a C implementation routine
11990 * and returns, taking three arguments in addition to the standard ones.
11991 *
11992 * @param a_pfnCImpl The pointer to the C routine.
11993 * @param a0 The first extra argument.
11994 * @param a1 The second extra argument.
11995 * @param a2 The third extra argument.
11996 */
11997#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11998
11999/**
12000 * Defers the rest of the instruction emulation to a C implementation routine
12001 * and returns, taking four arguments in addition to the standard ones.
12002 *
12003 * @param a_pfnCImpl The pointer to the C routine.
12004 * @param a0 The first extra argument.
12005 * @param a1 The second extra argument.
12006 * @param a2 The third extra argument.
12007 * @param a3 The fourth extra argument.
12008 */
12009#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12010
12011/**
12012 * Defers the rest of the instruction emulation to a C implementation routine
12013 * and returns, taking two arguments in addition to the standard ones.
12014 *
12015 * @param a_pfnCImpl The pointer to the C routine.
12016 * @param a0 The first extra argument.
12017 * @param a1 The second extra argument.
12018 * @param a2 The third extra argument.
12019 * @param a3 The fourth extra argument.
12020 * @param a4 The fifth extra argument.
12021 */
12022#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12023
12024/**
12025 * Defers the entire instruction emulation to a C implementation routine and
12026 * returns, only taking the standard parameters.
12027 *
12028 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12029 *
12030 * @param a_pfnCImpl The pointer to the C routine.
12031 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12032 */
12033#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12034
12035/**
12036 * Defers the entire instruction emulation to a C implementation routine and
12037 * returns, taking one argument in addition to the standard ones.
12038 *
12039 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12040 *
12041 * @param a_pfnCImpl The pointer to the C routine.
12042 * @param a0 The argument.
12043 */
12044#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12045
12046/**
12047 * Defers the entire instruction emulation to a C implementation routine and
12048 * returns, taking two arguments in addition to the standard ones.
12049 *
12050 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12051 *
12052 * @param a_pfnCImpl The pointer to the C routine.
12053 * @param a0 The first extra argument.
12054 * @param a1 The second extra argument.
12055 */
12056#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12057
12058/**
12059 * Defers the entire instruction emulation to a C implementation routine and
12060 * returns, taking three arguments in addition to the standard ones.
12061 *
12062 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12063 *
12064 * @param a_pfnCImpl The pointer to the C routine.
12065 * @param a0 The first extra argument.
12066 * @param a1 The second extra argument.
12067 * @param a2 The third extra argument.
12068 */
12069#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12070
12071/**
12072 * Calls a FPU assembly implementation taking one visible argument.
12073 *
12074 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12075 * @param a0 The first extra argument.
12076 */
12077#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12078 do { \
12079 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12080 } while (0)
12081
12082/**
12083 * Calls a FPU assembly implementation taking two visible arguments.
12084 *
12085 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12086 * @param a0 The first extra argument.
12087 * @param a1 The second extra argument.
12088 */
12089#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12090 do { \
12091 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12092 } while (0)
12093
12094/**
12095 * Calls a FPU assembly implementation taking three visible arguments.
12096 *
12097 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12098 * @param a0 The first extra argument.
12099 * @param a1 The second extra argument.
12100 * @param a2 The third extra argument.
12101 */
12102#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12103 do { \
12104 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12105 } while (0)
12106
12107#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12108 do { \
12109 (a_FpuData).FSW = (a_FSW); \
12110 (a_FpuData).r80Result = *(a_pr80Value); \
12111 } while (0)
12112
12113/** Pushes FPU result onto the stack. */
12114#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12115 iemFpuPushResult(pVCpu, &a_FpuData)
12116/** Pushes FPU result onto the stack and sets the FPUDP. */
12117#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12118 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12119
12120/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12121#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12122 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12123
12124/** Stores FPU result in a stack register. */
12125#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12126 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12127/** Stores FPU result in a stack register and pops the stack. */
12128#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12129 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12130/** Stores FPU result in a stack register and sets the FPUDP. */
12131#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12132 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12133/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12134 * stack. */
12135#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12136 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12137
12138/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12139#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12140 iemFpuUpdateOpcodeAndIp(pVCpu)
12141/** Free a stack register (for FFREE and FFREEP). */
12142#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12143 iemFpuStackFree(pVCpu, a_iStReg)
12144/** Increment the FPU stack pointer. */
12145#define IEM_MC_FPU_STACK_INC_TOP() \
12146 iemFpuStackIncTop(pVCpu)
12147/** Decrement the FPU stack pointer. */
12148#define IEM_MC_FPU_STACK_DEC_TOP() \
12149 iemFpuStackDecTop(pVCpu)
12150
12151/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12152#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12153 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12154/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12155#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12156 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12157/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12158#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12159 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12160/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12161#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12162 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12163/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12164 * stack. */
12165#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12166 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12167/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12168#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12169 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12170
12171/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12172#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12173 iemFpuStackUnderflow(pVCpu, a_iStDst)
12174/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12175 * stack. */
12176#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12177 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12178/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12179 * FPUDS. */
12180#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12181 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12182/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12183 * FPUDS. Pops stack. */
12184#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12185 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12186/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12187 * stack twice. */
12188#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12189 iemFpuStackUnderflowThenPopPop(pVCpu)
12190/** Raises a FPU stack underflow exception for an instruction pushing a result
12191 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12192#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12193 iemFpuStackPushUnderflow(pVCpu)
12194/** Raises a FPU stack underflow exception for an instruction pushing a result
12195 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12196#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12197 iemFpuStackPushUnderflowTwo(pVCpu)
12198
12199/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12200 * FPUIP, FPUCS and FOP. */
12201#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12202 iemFpuStackPushOverflow(pVCpu)
12203/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12204 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12205#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12206 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12207/** Prepares for using the FPU state.
12208 * Ensures that we can use the host FPU in the current context (RC+R0.
12209 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12210#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12211/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12212#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12213/** Actualizes the guest FPU state so it can be accessed and modified. */
12214#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12215
12216/** Prepares for using the SSE state.
12217 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12218 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12219#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12220/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12221#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12222/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12223#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12224
12225/** Prepares for using the AVX state.
12226 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12227 * Ensures the guest AVX state in the CPUMCTX is up to date.
12228 * @note This will include the AVX512 state too when support for it is added
12229 * due to the zero extending feature of VEX instruction. */
12230#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12231/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12232#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12233/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12234#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12235
12236/**
12237 * Calls a MMX assembly implementation taking two visible arguments.
12238 *
12239 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12240 * @param a0 The first extra argument.
12241 * @param a1 The second extra argument.
12242 */
12243#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12244 do { \
12245 IEM_MC_PREPARE_FPU_USAGE(); \
12246 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12247 } while (0)
12248
12249/**
12250 * Calls a MMX assembly implementation taking three visible arguments.
12251 *
12252 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12253 * @param a0 The first extra argument.
12254 * @param a1 The second extra argument.
12255 * @param a2 The third extra argument.
12256 */
12257#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12258 do { \
12259 IEM_MC_PREPARE_FPU_USAGE(); \
12260 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12261 } while (0)
12262
12263
12264/**
12265 * Calls a SSE assembly implementation taking two visible arguments.
12266 *
12267 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12268 * @param a0 The first extra argument.
12269 * @param a1 The second extra argument.
12270 */
12271#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12272 do { \
12273 IEM_MC_PREPARE_SSE_USAGE(); \
12274 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12275 } while (0)
12276
12277/**
12278 * Calls a SSE assembly implementation taking three visible arguments.
12279 *
12280 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12281 * @param a0 The first extra argument.
12282 * @param a1 The second extra argument.
12283 * @param a2 The third extra argument.
12284 */
12285#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12286 do { \
12287 IEM_MC_PREPARE_SSE_USAGE(); \
12288 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12289 } while (0)
12290
12291
12292/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12293 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12294#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12295 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12296
12297/**
12298 * Calls a AVX assembly implementation taking two visible arguments.
12299 *
12300 * There is one implicit zero'th argument, a pointer to the extended state.
12301 *
12302 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12303 * @param a1 The first extra argument.
12304 * @param a2 The second extra argument.
12305 */
12306#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12307 do { \
12308 IEM_MC_PREPARE_AVX_USAGE(); \
12309 a_pfnAImpl(pXState, (a1), (a2)); \
12310 } while (0)
12311
12312/**
12313 * Calls a AVX assembly implementation taking three visible arguments.
12314 *
12315 * There is one implicit zero'th argument, a pointer to the extended state.
12316 *
12317 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12318 * @param a1 The first extra argument.
12319 * @param a2 The second extra argument.
12320 * @param a3 The third extra argument.
12321 */
12322#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12323 do { \
12324 IEM_MC_PREPARE_AVX_USAGE(); \
12325 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12326 } while (0)
12327
12328/** @note Not for IOPL or IF testing. */
12329#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12330/** @note Not for IOPL or IF testing. */
12331#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12332/** @note Not for IOPL or IF testing. */
12333#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12334/** @note Not for IOPL or IF testing. */
12335#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12338 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12339 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12342 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12343 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12346 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12347 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12348 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12349/** @note Not for IOPL or IF testing. */
12350#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12351 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12352 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12353 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12354#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12355#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12356#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12357/** @note Not for IOPL or IF testing. */
12358#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12359 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12360 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12361/** @note Not for IOPL or IF testing. */
12362#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12363 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12364 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12365/** @note Not for IOPL or IF testing. */
12366#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12367 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12368 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12369/** @note Not for IOPL or IF testing. */
12370#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12371 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12372 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12373/** @note Not for IOPL or IF testing. */
12374#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12375 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12376 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12377/** @note Not for IOPL or IF testing. */
12378#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12379 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12380 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12381#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12382#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12383
12384#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12385 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12386#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12387 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12388#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12389 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12390#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12391 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12392#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12393 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12394#define IEM_MC_IF_FCW_IM() \
12395 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12396
12397#define IEM_MC_ELSE() } else {
12398#define IEM_MC_ENDIF() } do {} while (0)
12399
12400/** @} */
12401
12402
12403/** @name Opcode Debug Helpers.
12404 * @{
12405 */
12406#ifdef VBOX_WITH_STATISTICS
12407# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12408#else
12409# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12410#endif
12411
12412#ifdef DEBUG
12413# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12414 do { \
12415 IEMOP_INC_STATS(a_Stats); \
12416 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12417 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12418 } while (0)
12419
12420# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12421 do { \
12422 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12423 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12424 (void)RT_CONCAT(OP_,a_Upper); \
12425 (void)(a_fDisHints); \
12426 (void)(a_fIemHints); \
12427 } while (0)
12428
12429# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12430 do { \
12431 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12432 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12433 (void)RT_CONCAT(OP_,a_Upper); \
12434 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12435 (void)(a_fDisHints); \
12436 (void)(a_fIemHints); \
12437 } while (0)
12438
12439# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12440 do { \
12441 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12442 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12443 (void)RT_CONCAT(OP_,a_Upper); \
12444 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12445 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12446 (void)(a_fDisHints); \
12447 (void)(a_fIemHints); \
12448 } while (0)
12449
12450# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12451 do { \
12452 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12453 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12454 (void)RT_CONCAT(OP_,a_Upper); \
12455 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12456 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12457 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12458 (void)(a_fDisHints); \
12459 (void)(a_fIemHints); \
12460 } while (0)
12461
12462# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12463 do { \
12464 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12465 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12466 (void)RT_CONCAT(OP_,a_Upper); \
12467 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12468 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12469 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12470 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12471 (void)(a_fDisHints); \
12472 (void)(a_fIemHints); \
12473 } while (0)
12474
12475#else
12476# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12477
12478# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12479 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12480# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12481 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12482# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12483 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12484# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12485 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12486# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12487 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12488
12489#endif
12490
12491#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12492 IEMOP_MNEMONIC0EX(a_Lower, \
12493 #a_Lower, \
12494 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12495#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12496 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12497 #a_Lower " " #a_Op1, \
12498 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12499#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12500 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12501 #a_Lower " " #a_Op1 "," #a_Op2, \
12502 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12503#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12504 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12505 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12506 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12507#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12508 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12509 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12510 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12511
12512/** @} */
12513
12514
12515/** @name Opcode Helpers.
12516 * @{
12517 */
12518
12519#ifdef IN_RING3
12520# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12521 do { \
12522 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12523 else \
12524 { \
12525 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12526 return IEMOP_RAISE_INVALID_OPCODE(); \
12527 } \
12528 } while (0)
12529#else
12530# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12531 do { \
12532 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12533 else return IEMOP_RAISE_INVALID_OPCODE(); \
12534 } while (0)
12535#endif
12536
12537/** The instruction requires a 186 or later. */
12538#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12539# define IEMOP_HLP_MIN_186() do { } while (0)
12540#else
12541# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12542#endif
12543
12544/** The instruction requires a 286 or later. */
12545#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12546# define IEMOP_HLP_MIN_286() do { } while (0)
12547#else
12548# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12549#endif
12550
12551/** The instruction requires a 386 or later. */
12552#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12553# define IEMOP_HLP_MIN_386() do { } while (0)
12554#else
12555# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12556#endif
12557
12558/** The instruction requires a 386 or later if the given expression is true. */
12559#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12560# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12561#else
12562# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12563#endif
12564
12565/** The instruction requires a 486 or later. */
12566#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12567# define IEMOP_HLP_MIN_486() do { } while (0)
12568#else
12569# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12570#endif
12571
12572/** The instruction requires a Pentium (586) or later. */
12573#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12574# define IEMOP_HLP_MIN_586() do { } while (0)
12575#else
12576# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12577#endif
12578
12579/** The instruction requires a PentiumPro (686) or later. */
12580#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12581# define IEMOP_HLP_MIN_686() do { } while (0)
12582#else
12583# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12584#endif
12585
12586
12587/** The instruction raises an \#UD in real and V8086 mode. */
12588#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12589 do \
12590 { \
12591 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12592 else return IEMOP_RAISE_INVALID_OPCODE(); \
12593 } while (0)
12594
12595/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12596 * 64-bit mode. */
12597#define IEMOP_HLP_NO_64BIT() \
12598 do \
12599 { \
12600 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12601 return IEMOP_RAISE_INVALID_OPCODE(); \
12602 } while (0)
12603
12604/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12605 * 64-bit mode. */
12606#define IEMOP_HLP_ONLY_64BIT() \
12607 do \
12608 { \
12609 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12610 return IEMOP_RAISE_INVALID_OPCODE(); \
12611 } while (0)
12612
12613/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12614#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12615 do \
12616 { \
12617 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12618 iemRecalEffOpSize64Default(pVCpu); \
12619 } while (0)
12620
12621/** The instruction has 64-bit operand size if 64-bit mode. */
12622#define IEMOP_HLP_64BIT_OP_SIZE() \
12623 do \
12624 { \
12625 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12626 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12627 } while (0)
12628
12629/** Only a REX prefix immediately preceeding the first opcode byte takes
12630 * effect. This macro helps ensuring this as well as logging bad guest code. */
12631#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12632 do \
12633 { \
12634 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12635 { \
12636 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12637 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12638 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12639 pVCpu->iem.s.uRexB = 0; \
12640 pVCpu->iem.s.uRexIndex = 0; \
12641 pVCpu->iem.s.uRexReg = 0; \
12642 iemRecalEffOpSize(pVCpu); \
12643 } \
12644 } while (0)
12645
12646/**
12647 * Done decoding.
12648 */
12649#define IEMOP_HLP_DONE_DECODING() \
12650 do \
12651 { \
12652 /*nothing for now, maybe later... */ \
12653 } while (0)
12654
12655/**
12656 * Done decoding, raise \#UD exception if lock prefix present.
12657 */
12658#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12659 do \
12660 { \
12661 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12662 { /* likely */ } \
12663 else \
12664 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12665 } while (0)
12666
12667
12668/**
12669 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12670 * repnz or size prefixes are present, or if in real or v8086 mode.
12671 */
12672#define IEMOP_HLP_DONE_VEX_DECODING() \
12673 do \
12674 { \
12675 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12676 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12677 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12678 { /* likely */ } \
12679 else \
12680 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12681 } while (0)
12682
12683/**
12684 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12685 * repnz or size prefixes are present, or if in real or v8086 mode.
12686 */
12687#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12688 do \
12689 { \
12690 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12691 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12692 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12693 && pVCpu->iem.s.uVexLength == 0)) \
12694 { /* likely */ } \
12695 else \
12696 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12697 } while (0)
12698
12699
12700/**
12701 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12702 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12703 * register 0, or if in real or v8086 mode.
12704 */
12705#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12706 do \
12707 { \
12708 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12709 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12710 && !pVCpu->iem.s.uVex3rdReg \
12711 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12712 { /* likely */ } \
12713 else \
12714 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12715 } while (0)
12716
12717/**
12718 * Done decoding VEX, no V, L=0.
12719 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12720 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12721 */
12722#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12723 do \
12724 { \
12725 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12726 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12727 && pVCpu->iem.s.uVexLength == 0 \
12728 && pVCpu->iem.s.uVex3rdReg == 0 \
12729 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12730 { /* likely */ } \
12731 else \
12732 return IEMOP_RAISE_INVALID_OPCODE(); \
12733 } while (0)
12734
12735#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12736 do \
12737 { \
12738 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12739 { /* likely */ } \
12740 else \
12741 { \
12742 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12743 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12744 } \
12745 } while (0)
12746#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12747 do \
12748 { \
12749 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12750 { /* likely */ } \
12751 else \
12752 { \
12753 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12754 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12755 } \
12756 } while (0)
12757
12758/**
12759 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12760 * are present.
12761 */
12762#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12763 do \
12764 { \
12765 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12766 { /* likely */ } \
12767 else \
12768 return IEMOP_RAISE_INVALID_OPCODE(); \
12769 } while (0)
12770
12771
12772#ifdef VBOX_WITH_NESTED_HWVIRT
12773/** Check and handles SVM nested-guest control & instruction intercept. */
12774# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12775 do \
12776 { \
12777 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12778 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12779 } while (0)
12780
12781/** Check and handle SVM nested-guest CR0 read intercept. */
12782# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12783 do \
12784 { \
12785 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12786 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12787 } while (0)
12788
12789#else /* !VBOX_WITH_NESTED_HWVIRT */
12790# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12791# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12792#endif /* !VBOX_WITH_NESTED_HWVIRT */
12793
12794
12795/**
12796 * Calculates the effective address of a ModR/M memory operand.
12797 *
12798 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12799 *
12800 * @return Strict VBox status code.
12801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12802 * @param bRm The ModRM byte.
12803 * @param cbImm The size of any immediate following the
12804 * effective address opcode bytes. Important for
12805 * RIP relative addressing.
12806 * @param pGCPtrEff Where to return the effective address.
12807 */
12808IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12809{
12810 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12811 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12812# define SET_SS_DEF() \
12813 do \
12814 { \
12815 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12816 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12817 } while (0)
12818
12819 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12820 {
12821/** @todo Check the effective address size crap! */
12822 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12823 {
12824 uint16_t u16EffAddr;
12825
12826 /* Handle the disp16 form with no registers first. */
12827 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12828 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12829 else
12830 {
12831 /* Get the displacment. */
12832 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12833 {
12834 case 0: u16EffAddr = 0; break;
12835 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12836 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12837 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12838 }
12839
12840 /* Add the base and index registers to the disp. */
12841 switch (bRm & X86_MODRM_RM_MASK)
12842 {
12843 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12844 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12845 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12846 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12847 case 4: u16EffAddr += pCtx->si; break;
12848 case 5: u16EffAddr += pCtx->di; break;
12849 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12850 case 7: u16EffAddr += pCtx->bx; break;
12851 }
12852 }
12853
12854 *pGCPtrEff = u16EffAddr;
12855 }
12856 else
12857 {
12858 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12859 uint32_t u32EffAddr;
12860
12861 /* Handle the disp32 form with no registers first. */
12862 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12863 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12864 else
12865 {
12866 /* Get the register (or SIB) value. */
12867 switch ((bRm & X86_MODRM_RM_MASK))
12868 {
12869 case 0: u32EffAddr = pCtx->eax; break;
12870 case 1: u32EffAddr = pCtx->ecx; break;
12871 case 2: u32EffAddr = pCtx->edx; break;
12872 case 3: u32EffAddr = pCtx->ebx; break;
12873 case 4: /* SIB */
12874 {
12875 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12876
12877 /* Get the index and scale it. */
12878 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12879 {
12880 case 0: u32EffAddr = pCtx->eax; break;
12881 case 1: u32EffAddr = pCtx->ecx; break;
12882 case 2: u32EffAddr = pCtx->edx; break;
12883 case 3: u32EffAddr = pCtx->ebx; break;
12884 case 4: u32EffAddr = 0; /*none */ break;
12885 case 5: u32EffAddr = pCtx->ebp; break;
12886 case 6: u32EffAddr = pCtx->esi; break;
12887 case 7: u32EffAddr = pCtx->edi; break;
12888 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12889 }
12890 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12891
12892 /* add base */
12893 switch (bSib & X86_SIB_BASE_MASK)
12894 {
12895 case 0: u32EffAddr += pCtx->eax; break;
12896 case 1: u32EffAddr += pCtx->ecx; break;
12897 case 2: u32EffAddr += pCtx->edx; break;
12898 case 3: u32EffAddr += pCtx->ebx; break;
12899 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12900 case 5:
12901 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12902 {
12903 u32EffAddr += pCtx->ebp;
12904 SET_SS_DEF();
12905 }
12906 else
12907 {
12908 uint32_t u32Disp;
12909 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12910 u32EffAddr += u32Disp;
12911 }
12912 break;
12913 case 6: u32EffAddr += pCtx->esi; break;
12914 case 7: u32EffAddr += pCtx->edi; break;
12915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12916 }
12917 break;
12918 }
12919 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12920 case 6: u32EffAddr = pCtx->esi; break;
12921 case 7: u32EffAddr = pCtx->edi; break;
12922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12923 }
12924
12925 /* Get and add the displacement. */
12926 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12927 {
12928 case 0:
12929 break;
12930 case 1:
12931 {
12932 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12933 u32EffAddr += i8Disp;
12934 break;
12935 }
12936 case 2:
12937 {
12938 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12939 u32EffAddr += u32Disp;
12940 break;
12941 }
12942 default:
12943 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12944 }
12945
12946 }
12947 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12948 *pGCPtrEff = u32EffAddr;
12949 else
12950 {
12951 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12952 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12953 }
12954 }
12955 }
12956 else
12957 {
12958 uint64_t u64EffAddr;
12959
12960 /* Handle the rip+disp32 form with no registers first. */
12961 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12962 {
12963 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12964 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12965 }
12966 else
12967 {
12968 /* Get the register (or SIB) value. */
12969 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12970 {
12971 case 0: u64EffAddr = pCtx->rax; break;
12972 case 1: u64EffAddr = pCtx->rcx; break;
12973 case 2: u64EffAddr = pCtx->rdx; break;
12974 case 3: u64EffAddr = pCtx->rbx; break;
12975 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12976 case 6: u64EffAddr = pCtx->rsi; break;
12977 case 7: u64EffAddr = pCtx->rdi; break;
12978 case 8: u64EffAddr = pCtx->r8; break;
12979 case 9: u64EffAddr = pCtx->r9; break;
12980 case 10: u64EffAddr = pCtx->r10; break;
12981 case 11: u64EffAddr = pCtx->r11; break;
12982 case 13: u64EffAddr = pCtx->r13; break;
12983 case 14: u64EffAddr = pCtx->r14; break;
12984 case 15: u64EffAddr = pCtx->r15; break;
12985 /* SIB */
12986 case 4:
12987 case 12:
12988 {
12989 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12990
12991 /* Get the index and scale it. */
12992 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12993 {
12994 case 0: u64EffAddr = pCtx->rax; break;
12995 case 1: u64EffAddr = pCtx->rcx; break;
12996 case 2: u64EffAddr = pCtx->rdx; break;
12997 case 3: u64EffAddr = pCtx->rbx; break;
12998 case 4: u64EffAddr = 0; /*none */ break;
12999 case 5: u64EffAddr = pCtx->rbp; break;
13000 case 6: u64EffAddr = pCtx->rsi; break;
13001 case 7: u64EffAddr = pCtx->rdi; break;
13002 case 8: u64EffAddr = pCtx->r8; break;
13003 case 9: u64EffAddr = pCtx->r9; break;
13004 case 10: u64EffAddr = pCtx->r10; break;
13005 case 11: u64EffAddr = pCtx->r11; break;
13006 case 12: u64EffAddr = pCtx->r12; break;
13007 case 13: u64EffAddr = pCtx->r13; break;
13008 case 14: u64EffAddr = pCtx->r14; break;
13009 case 15: u64EffAddr = pCtx->r15; break;
13010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13011 }
13012 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13013
13014 /* add base */
13015 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13016 {
13017 case 0: u64EffAddr += pCtx->rax; break;
13018 case 1: u64EffAddr += pCtx->rcx; break;
13019 case 2: u64EffAddr += pCtx->rdx; break;
13020 case 3: u64EffAddr += pCtx->rbx; break;
13021 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13022 case 6: u64EffAddr += pCtx->rsi; break;
13023 case 7: u64EffAddr += pCtx->rdi; break;
13024 case 8: u64EffAddr += pCtx->r8; break;
13025 case 9: u64EffAddr += pCtx->r9; break;
13026 case 10: u64EffAddr += pCtx->r10; break;
13027 case 11: u64EffAddr += pCtx->r11; break;
13028 case 12: u64EffAddr += pCtx->r12; break;
13029 case 14: u64EffAddr += pCtx->r14; break;
13030 case 15: u64EffAddr += pCtx->r15; break;
13031 /* complicated encodings */
13032 case 5:
13033 case 13:
13034 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13035 {
13036 if (!pVCpu->iem.s.uRexB)
13037 {
13038 u64EffAddr += pCtx->rbp;
13039 SET_SS_DEF();
13040 }
13041 else
13042 u64EffAddr += pCtx->r13;
13043 }
13044 else
13045 {
13046 uint32_t u32Disp;
13047 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13048 u64EffAddr += (int32_t)u32Disp;
13049 }
13050 break;
13051 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13052 }
13053 break;
13054 }
13055 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13056 }
13057
13058 /* Get and add the displacement. */
13059 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13060 {
13061 case 0:
13062 break;
13063 case 1:
13064 {
13065 int8_t i8Disp;
13066 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13067 u64EffAddr += i8Disp;
13068 break;
13069 }
13070 case 2:
13071 {
13072 uint32_t u32Disp;
13073 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13074 u64EffAddr += (int32_t)u32Disp;
13075 break;
13076 }
13077 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13078 }
13079
13080 }
13081
13082 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13083 *pGCPtrEff = u64EffAddr;
13084 else
13085 {
13086 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13087 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13088 }
13089 }
13090
13091 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13092 return VINF_SUCCESS;
13093}
13094
13095
13096/**
13097 * Calculates the effective address of a ModR/M memory operand.
13098 *
13099 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13100 *
13101 * @return Strict VBox status code.
13102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13103 * @param bRm The ModRM byte.
13104 * @param cbImm The size of any immediate following the
13105 * effective address opcode bytes. Important for
13106 * RIP relative addressing.
13107 * @param pGCPtrEff Where to return the effective address.
13108 * @param offRsp RSP displacement.
13109 */
13110IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13111{
13112 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13113 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13114# define SET_SS_DEF() \
13115 do \
13116 { \
13117 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13118 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13119 } while (0)
13120
13121 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13122 {
13123/** @todo Check the effective address size crap! */
13124 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13125 {
13126 uint16_t u16EffAddr;
13127
13128 /* Handle the disp16 form with no registers first. */
13129 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13130 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13131 else
13132 {
13133 /* Get the displacment. */
13134 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13135 {
13136 case 0: u16EffAddr = 0; break;
13137 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13138 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13139 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13140 }
13141
13142 /* Add the base and index registers to the disp. */
13143 switch (bRm & X86_MODRM_RM_MASK)
13144 {
13145 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13146 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13147 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13148 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13149 case 4: u16EffAddr += pCtx->si; break;
13150 case 5: u16EffAddr += pCtx->di; break;
13151 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13152 case 7: u16EffAddr += pCtx->bx; break;
13153 }
13154 }
13155
13156 *pGCPtrEff = u16EffAddr;
13157 }
13158 else
13159 {
13160 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13161 uint32_t u32EffAddr;
13162
13163 /* Handle the disp32 form with no registers first. */
13164 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13165 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13166 else
13167 {
13168 /* Get the register (or SIB) value. */
13169 switch ((bRm & X86_MODRM_RM_MASK))
13170 {
13171 case 0: u32EffAddr = pCtx->eax; break;
13172 case 1: u32EffAddr = pCtx->ecx; break;
13173 case 2: u32EffAddr = pCtx->edx; break;
13174 case 3: u32EffAddr = pCtx->ebx; break;
13175 case 4: /* SIB */
13176 {
13177 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13178
13179 /* Get the index and scale it. */
13180 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13181 {
13182 case 0: u32EffAddr = pCtx->eax; break;
13183 case 1: u32EffAddr = pCtx->ecx; break;
13184 case 2: u32EffAddr = pCtx->edx; break;
13185 case 3: u32EffAddr = pCtx->ebx; break;
13186 case 4: u32EffAddr = 0; /*none */ break;
13187 case 5: u32EffAddr = pCtx->ebp; break;
13188 case 6: u32EffAddr = pCtx->esi; break;
13189 case 7: u32EffAddr = pCtx->edi; break;
13190 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13191 }
13192 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13193
13194 /* add base */
13195 switch (bSib & X86_SIB_BASE_MASK)
13196 {
13197 case 0: u32EffAddr += pCtx->eax; break;
13198 case 1: u32EffAddr += pCtx->ecx; break;
13199 case 2: u32EffAddr += pCtx->edx; break;
13200 case 3: u32EffAddr += pCtx->ebx; break;
13201 case 4:
13202 u32EffAddr += pCtx->esp + offRsp;
13203 SET_SS_DEF();
13204 break;
13205 case 5:
13206 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13207 {
13208 u32EffAddr += pCtx->ebp;
13209 SET_SS_DEF();
13210 }
13211 else
13212 {
13213 uint32_t u32Disp;
13214 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13215 u32EffAddr += u32Disp;
13216 }
13217 break;
13218 case 6: u32EffAddr += pCtx->esi; break;
13219 case 7: u32EffAddr += pCtx->edi; break;
13220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13221 }
13222 break;
13223 }
13224 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13225 case 6: u32EffAddr = pCtx->esi; break;
13226 case 7: u32EffAddr = pCtx->edi; break;
13227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13228 }
13229
13230 /* Get and add the displacement. */
13231 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13232 {
13233 case 0:
13234 break;
13235 case 1:
13236 {
13237 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13238 u32EffAddr += i8Disp;
13239 break;
13240 }
13241 case 2:
13242 {
13243 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13244 u32EffAddr += u32Disp;
13245 break;
13246 }
13247 default:
13248 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13249 }
13250
13251 }
13252 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13253 *pGCPtrEff = u32EffAddr;
13254 else
13255 {
13256 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13257 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13258 }
13259 }
13260 }
13261 else
13262 {
13263 uint64_t u64EffAddr;
13264
13265 /* Handle the rip+disp32 form with no registers first. */
13266 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13267 {
13268 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13269 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13270 }
13271 else
13272 {
13273 /* Get the register (or SIB) value. */
13274 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13275 {
13276 case 0: u64EffAddr = pCtx->rax; break;
13277 case 1: u64EffAddr = pCtx->rcx; break;
13278 case 2: u64EffAddr = pCtx->rdx; break;
13279 case 3: u64EffAddr = pCtx->rbx; break;
13280 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13281 case 6: u64EffAddr = pCtx->rsi; break;
13282 case 7: u64EffAddr = pCtx->rdi; break;
13283 case 8: u64EffAddr = pCtx->r8; break;
13284 case 9: u64EffAddr = pCtx->r9; break;
13285 case 10: u64EffAddr = pCtx->r10; break;
13286 case 11: u64EffAddr = pCtx->r11; break;
13287 case 13: u64EffAddr = pCtx->r13; break;
13288 case 14: u64EffAddr = pCtx->r14; break;
13289 case 15: u64EffAddr = pCtx->r15; break;
13290 /* SIB */
13291 case 4:
13292 case 12:
13293 {
13294 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13295
13296 /* Get the index and scale it. */
13297 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13298 {
13299 case 0: u64EffAddr = pCtx->rax; break;
13300 case 1: u64EffAddr = pCtx->rcx; break;
13301 case 2: u64EffAddr = pCtx->rdx; break;
13302 case 3: u64EffAddr = pCtx->rbx; break;
13303 case 4: u64EffAddr = 0; /*none */ break;
13304 case 5: u64EffAddr = pCtx->rbp; break;
13305 case 6: u64EffAddr = pCtx->rsi; break;
13306 case 7: u64EffAddr = pCtx->rdi; break;
13307 case 8: u64EffAddr = pCtx->r8; break;
13308 case 9: u64EffAddr = pCtx->r9; break;
13309 case 10: u64EffAddr = pCtx->r10; break;
13310 case 11: u64EffAddr = pCtx->r11; break;
13311 case 12: u64EffAddr = pCtx->r12; break;
13312 case 13: u64EffAddr = pCtx->r13; break;
13313 case 14: u64EffAddr = pCtx->r14; break;
13314 case 15: u64EffAddr = pCtx->r15; break;
13315 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13316 }
13317 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13318
13319 /* add base */
13320 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13321 {
13322 case 0: u64EffAddr += pCtx->rax; break;
13323 case 1: u64EffAddr += pCtx->rcx; break;
13324 case 2: u64EffAddr += pCtx->rdx; break;
13325 case 3: u64EffAddr += pCtx->rbx; break;
13326 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13327 case 6: u64EffAddr += pCtx->rsi; break;
13328 case 7: u64EffAddr += pCtx->rdi; break;
13329 case 8: u64EffAddr += pCtx->r8; break;
13330 case 9: u64EffAddr += pCtx->r9; break;
13331 case 10: u64EffAddr += pCtx->r10; break;
13332 case 11: u64EffAddr += pCtx->r11; break;
13333 case 12: u64EffAddr += pCtx->r12; break;
13334 case 14: u64EffAddr += pCtx->r14; break;
13335 case 15: u64EffAddr += pCtx->r15; break;
13336 /* complicated encodings */
13337 case 5:
13338 case 13:
13339 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13340 {
13341 if (!pVCpu->iem.s.uRexB)
13342 {
13343 u64EffAddr += pCtx->rbp;
13344 SET_SS_DEF();
13345 }
13346 else
13347 u64EffAddr += pCtx->r13;
13348 }
13349 else
13350 {
13351 uint32_t u32Disp;
13352 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13353 u64EffAddr += (int32_t)u32Disp;
13354 }
13355 break;
13356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13357 }
13358 break;
13359 }
13360 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13361 }
13362
13363 /* Get and add the displacement. */
13364 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13365 {
13366 case 0:
13367 break;
13368 case 1:
13369 {
13370 int8_t i8Disp;
13371 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13372 u64EffAddr += i8Disp;
13373 break;
13374 }
13375 case 2:
13376 {
13377 uint32_t u32Disp;
13378 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13379 u64EffAddr += (int32_t)u32Disp;
13380 break;
13381 }
13382 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13383 }
13384
13385 }
13386
13387 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13388 *pGCPtrEff = u64EffAddr;
13389 else
13390 {
13391 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13392 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13393 }
13394 }
13395
13396 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13397 return VINF_SUCCESS;
13398}
13399
13400
13401#ifdef IEM_WITH_SETJMP
13402/**
13403 * Calculates the effective address of a ModR/M memory operand.
13404 *
13405 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13406 *
13407 * May longjmp on internal error.
13408 *
13409 * @return The effective address.
13410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13411 * @param bRm The ModRM byte.
13412 * @param cbImm The size of any immediate following the
13413 * effective address opcode bytes. Important for
13414 * RIP relative addressing.
13415 */
13416IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13417{
13418 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13419 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13420# define SET_SS_DEF() \
13421 do \
13422 { \
13423 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13424 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13425 } while (0)
13426
13427 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13428 {
13429/** @todo Check the effective address size crap! */
13430 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13431 {
13432 uint16_t u16EffAddr;
13433
13434 /* Handle the disp16 form with no registers first. */
13435 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13436 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13437 else
13438 {
13439 /* Get the displacment. */
13440 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13441 {
13442 case 0: u16EffAddr = 0; break;
13443 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13444 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13445 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13446 }
13447
13448 /* Add the base and index registers to the disp. */
13449 switch (bRm & X86_MODRM_RM_MASK)
13450 {
13451 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13452 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13453 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13454 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13455 case 4: u16EffAddr += pCtx->si; break;
13456 case 5: u16EffAddr += pCtx->di; break;
13457 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13458 case 7: u16EffAddr += pCtx->bx; break;
13459 }
13460 }
13461
13462 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13463 return u16EffAddr;
13464 }
13465
13466 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13467 uint32_t u32EffAddr;
13468
13469 /* Handle the disp32 form with no registers first. */
13470 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13471 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13472 else
13473 {
13474 /* Get the register (or SIB) value. */
13475 switch ((bRm & X86_MODRM_RM_MASK))
13476 {
13477 case 0: u32EffAddr = pCtx->eax; break;
13478 case 1: u32EffAddr = pCtx->ecx; break;
13479 case 2: u32EffAddr = pCtx->edx; break;
13480 case 3: u32EffAddr = pCtx->ebx; break;
13481 case 4: /* SIB */
13482 {
13483 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13484
13485 /* Get the index and scale it. */
13486 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13487 {
13488 case 0: u32EffAddr = pCtx->eax; break;
13489 case 1: u32EffAddr = pCtx->ecx; break;
13490 case 2: u32EffAddr = pCtx->edx; break;
13491 case 3: u32EffAddr = pCtx->ebx; break;
13492 case 4: u32EffAddr = 0; /*none */ break;
13493 case 5: u32EffAddr = pCtx->ebp; break;
13494 case 6: u32EffAddr = pCtx->esi; break;
13495 case 7: u32EffAddr = pCtx->edi; break;
13496 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13497 }
13498 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13499
13500 /* add base */
13501 switch (bSib & X86_SIB_BASE_MASK)
13502 {
13503 case 0: u32EffAddr += pCtx->eax; break;
13504 case 1: u32EffAddr += pCtx->ecx; break;
13505 case 2: u32EffAddr += pCtx->edx; break;
13506 case 3: u32EffAddr += pCtx->ebx; break;
13507 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13508 case 5:
13509 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13510 {
13511 u32EffAddr += pCtx->ebp;
13512 SET_SS_DEF();
13513 }
13514 else
13515 {
13516 uint32_t u32Disp;
13517 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13518 u32EffAddr += u32Disp;
13519 }
13520 break;
13521 case 6: u32EffAddr += pCtx->esi; break;
13522 case 7: u32EffAddr += pCtx->edi; break;
13523 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13524 }
13525 break;
13526 }
13527 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13528 case 6: u32EffAddr = pCtx->esi; break;
13529 case 7: u32EffAddr = pCtx->edi; break;
13530 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13531 }
13532
13533 /* Get and add the displacement. */
13534 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13535 {
13536 case 0:
13537 break;
13538 case 1:
13539 {
13540 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13541 u32EffAddr += i8Disp;
13542 break;
13543 }
13544 case 2:
13545 {
13546 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13547 u32EffAddr += u32Disp;
13548 break;
13549 }
13550 default:
13551 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13552 }
13553 }
13554
13555 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13556 {
13557 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13558 return u32EffAddr;
13559 }
13560 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13561 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13562 return u32EffAddr & UINT16_MAX;
13563 }
13564
13565 uint64_t u64EffAddr;
13566
13567 /* Handle the rip+disp32 form with no registers first. */
13568 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13569 {
13570 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13571 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13572 }
13573 else
13574 {
13575 /* Get the register (or SIB) value. */
13576 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13577 {
13578 case 0: u64EffAddr = pCtx->rax; break;
13579 case 1: u64EffAddr = pCtx->rcx; break;
13580 case 2: u64EffAddr = pCtx->rdx; break;
13581 case 3: u64EffAddr = pCtx->rbx; break;
13582 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13583 case 6: u64EffAddr = pCtx->rsi; break;
13584 case 7: u64EffAddr = pCtx->rdi; break;
13585 case 8: u64EffAddr = pCtx->r8; break;
13586 case 9: u64EffAddr = pCtx->r9; break;
13587 case 10: u64EffAddr = pCtx->r10; break;
13588 case 11: u64EffAddr = pCtx->r11; break;
13589 case 13: u64EffAddr = pCtx->r13; break;
13590 case 14: u64EffAddr = pCtx->r14; break;
13591 case 15: u64EffAddr = pCtx->r15; break;
13592 /* SIB */
13593 case 4:
13594 case 12:
13595 {
13596 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13597
13598 /* Get the index and scale it. */
13599 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13600 {
13601 case 0: u64EffAddr = pCtx->rax; break;
13602 case 1: u64EffAddr = pCtx->rcx; break;
13603 case 2: u64EffAddr = pCtx->rdx; break;
13604 case 3: u64EffAddr = pCtx->rbx; break;
13605 case 4: u64EffAddr = 0; /*none */ break;
13606 case 5: u64EffAddr = pCtx->rbp; break;
13607 case 6: u64EffAddr = pCtx->rsi; break;
13608 case 7: u64EffAddr = pCtx->rdi; break;
13609 case 8: u64EffAddr = pCtx->r8; break;
13610 case 9: u64EffAddr = pCtx->r9; break;
13611 case 10: u64EffAddr = pCtx->r10; break;
13612 case 11: u64EffAddr = pCtx->r11; break;
13613 case 12: u64EffAddr = pCtx->r12; break;
13614 case 13: u64EffAddr = pCtx->r13; break;
13615 case 14: u64EffAddr = pCtx->r14; break;
13616 case 15: u64EffAddr = pCtx->r15; break;
13617 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13618 }
13619 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13620
13621 /* add base */
13622 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13623 {
13624 case 0: u64EffAddr += pCtx->rax; break;
13625 case 1: u64EffAddr += pCtx->rcx; break;
13626 case 2: u64EffAddr += pCtx->rdx; break;
13627 case 3: u64EffAddr += pCtx->rbx; break;
13628 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13629 case 6: u64EffAddr += pCtx->rsi; break;
13630 case 7: u64EffAddr += pCtx->rdi; break;
13631 case 8: u64EffAddr += pCtx->r8; break;
13632 case 9: u64EffAddr += pCtx->r9; break;
13633 case 10: u64EffAddr += pCtx->r10; break;
13634 case 11: u64EffAddr += pCtx->r11; break;
13635 case 12: u64EffAddr += pCtx->r12; break;
13636 case 14: u64EffAddr += pCtx->r14; break;
13637 case 15: u64EffAddr += pCtx->r15; break;
13638 /* complicated encodings */
13639 case 5:
13640 case 13:
13641 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13642 {
13643 if (!pVCpu->iem.s.uRexB)
13644 {
13645 u64EffAddr += pCtx->rbp;
13646 SET_SS_DEF();
13647 }
13648 else
13649 u64EffAddr += pCtx->r13;
13650 }
13651 else
13652 {
13653 uint32_t u32Disp;
13654 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13655 u64EffAddr += (int32_t)u32Disp;
13656 }
13657 break;
13658 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13659 }
13660 break;
13661 }
13662 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13663 }
13664
13665 /* Get and add the displacement. */
13666 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13667 {
13668 case 0:
13669 break;
13670 case 1:
13671 {
13672 int8_t i8Disp;
13673 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13674 u64EffAddr += i8Disp;
13675 break;
13676 }
13677 case 2:
13678 {
13679 uint32_t u32Disp;
13680 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13681 u64EffAddr += (int32_t)u32Disp;
13682 break;
13683 }
13684 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13685 }
13686
13687 }
13688
13689 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13690 {
13691 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13692 return u64EffAddr;
13693 }
13694 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13695 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13696 return u64EffAddr & UINT32_MAX;
13697}
13698#endif /* IEM_WITH_SETJMP */
13699
13700
13701/** @} */
13702
13703
13704
13705/*
13706 * Include the instructions
13707 */
13708#include "IEMAllInstructions.cpp.h"
13709
13710
13711
13712
13713#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13714
13715/**
13716 * Sets up execution verification mode.
13717 */
13718IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13719{
13720 PVMCPU pVCpu = pVCpu;
13721 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13722
13723 /*
13724 * Always note down the address of the current instruction.
13725 */
13726 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13727 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13728
13729 /*
13730 * Enable verification and/or logging.
13731 */
13732 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13733 if ( fNewNoRem
13734 && ( 0
13735#if 0 /* auto enable on first paged protected mode interrupt */
13736 || ( pOrgCtx->eflags.Bits.u1IF
13737 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13738 && TRPMHasTrap(pVCpu)
13739 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13740#endif
13741#if 0
13742 || ( pOrgCtx->cs == 0x10
13743 && ( pOrgCtx->rip == 0x90119e3e
13744 || pOrgCtx->rip == 0x901d9810)
13745#endif
13746#if 0 /* Auto enable DSL - FPU stuff. */
13747 || ( pOrgCtx->cs == 0x10
13748 && (// pOrgCtx->rip == 0xc02ec07f
13749 //|| pOrgCtx->rip == 0xc02ec082
13750 //|| pOrgCtx->rip == 0xc02ec0c9
13751 0
13752 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13753#endif
13754#if 0 /* Auto enable DSL - fstp st0 stuff. */
13755 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13756#endif
13757#if 0
13758 || pOrgCtx->rip == 0x9022bb3a
13759#endif
13760#if 0
13761 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13762#endif
13763#if 0
13764 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13765 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13766#endif
13767#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13768 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13769 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13770 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13771#endif
13772#if 0 /* NT4SP1 - xadd early boot. */
13773 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13774#endif
13775#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13776 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13777#endif
13778#if 0 /* NT4SP1 - cmpxchg (AMD). */
13779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13780#endif
13781#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13783#endif
13784#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13785 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13786
13787#endif
13788#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13789 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13790
13791#endif
13792#if 0 /* NT4SP1 - frstor [ecx] */
13793 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13794#endif
13795#if 0 /* xxxxxx - All long mode code. */
13796 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13797#endif
13798#if 0 /* rep movsq linux 3.7 64-bit boot. */
13799 || (pOrgCtx->rip == 0x0000000000100241)
13800#endif
13801#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13802 || (pOrgCtx->rip == 0x000000000215e240)
13803#endif
13804#if 0 /* DOS's size-overridden iret to v8086. */
13805 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13806#endif
13807 )
13808 )
13809 {
13810 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13811 RTLogFlags(NULL, "enabled");
13812 fNewNoRem = false;
13813 }
13814 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13815 {
13816 pVCpu->iem.s.fNoRem = fNewNoRem;
13817 if (!fNewNoRem)
13818 {
13819 LogAlways(("Enabling verification mode!\n"));
13820 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13821 }
13822 else
13823 LogAlways(("Disabling verification mode!\n"));
13824 }
13825
13826 /*
13827 * Switch state.
13828 */
13829 if (IEM_VERIFICATION_ENABLED(pVCpu))
13830 {
13831 static CPUMCTX s_DebugCtx; /* Ugly! */
13832
13833 s_DebugCtx = *pOrgCtx;
13834 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13835 }
13836
13837 /*
13838 * See if there is an interrupt pending in TRPM and inject it if we can.
13839 */
13840 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13841 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13842#if defined(VBOX_WITH_NESTED_HWVIRT)
13843 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif;
13844 if (fIntrEnabled)
13845 {
13846 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13847 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
13848 else
13849 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13850 }
13851#else
13852 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13853#endif
13854 if ( fIntrEnabled
13855 && TRPMHasTrap(pVCpu)
13856 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13857 {
13858 uint8_t u8TrapNo;
13859 TRPMEVENT enmType;
13860 RTGCUINT uErrCode;
13861 RTGCPTR uCr2;
13862 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13863 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13864 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13865 TRPMResetTrap(pVCpu);
13866 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13867 }
13868
13869 /*
13870 * Reset the counters.
13871 */
13872 pVCpu->iem.s.cIOReads = 0;
13873 pVCpu->iem.s.cIOWrites = 0;
13874 pVCpu->iem.s.fIgnoreRaxRdx = false;
13875 pVCpu->iem.s.fOverlappingMovs = false;
13876 pVCpu->iem.s.fProblematicMemory = false;
13877 pVCpu->iem.s.fUndefinedEFlags = 0;
13878
13879 if (IEM_VERIFICATION_ENABLED(pVCpu))
13880 {
13881 /*
13882 * Free all verification records.
13883 */
13884 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13885 pVCpu->iem.s.pIemEvtRecHead = NULL;
13886 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13887 do
13888 {
13889 while (pEvtRec)
13890 {
13891 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13892 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13893 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13894 pEvtRec = pNext;
13895 }
13896 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13897 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13898 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13899 } while (pEvtRec);
13900 }
13901}
13902
13903
13904/**
13905 * Allocate an event record.
13906 * @returns Pointer to a record.
13907 */
13908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13909{
13910 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13911 return NULL;
13912
13913 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13914 if (pEvtRec)
13915 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13916 else
13917 {
13918 if (!pVCpu->iem.s.ppIemEvtRecNext)
13919 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13920
13921 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13922 if (!pEvtRec)
13923 return NULL;
13924 }
13925 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13926 pEvtRec->pNext = NULL;
13927 return pEvtRec;
13928}
13929
13930
13931/**
13932 * IOMMMIORead notification.
13933 */
13934VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13935{
13936 PVMCPU pVCpu = VMMGetCpu(pVM);
13937 if (!pVCpu)
13938 return;
13939 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13940 if (!pEvtRec)
13941 return;
13942 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13943 pEvtRec->u.RamRead.GCPhys = GCPhys;
13944 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13945 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13946 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13947}
13948
13949
13950/**
13951 * IOMMMIOWrite notification.
13952 */
13953VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13954{
13955 PVMCPU pVCpu = VMMGetCpu(pVM);
13956 if (!pVCpu)
13957 return;
13958 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13959 if (!pEvtRec)
13960 return;
13961 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13962 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13963 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13964 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13965 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13966 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13967 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13968 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13969 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13970}
13971
13972
13973/**
13974 * IOMIOPortRead notification.
13975 */
13976VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13977{
13978 PVMCPU pVCpu = VMMGetCpu(pVM);
13979 if (!pVCpu)
13980 return;
13981 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13982 if (!pEvtRec)
13983 return;
13984 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13985 pEvtRec->u.IOPortRead.Port = Port;
13986 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13987 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13988 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13989}
13990
13991/**
13992 * IOMIOPortWrite notification.
13993 */
13994VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13995{
13996 PVMCPU pVCpu = VMMGetCpu(pVM);
13997 if (!pVCpu)
13998 return;
13999 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14000 if (!pEvtRec)
14001 return;
14002 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14003 pEvtRec->u.IOPortWrite.Port = Port;
14004 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14005 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14006 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14007 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14008}
14009
14010
14011VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14012{
14013 PVMCPU pVCpu = VMMGetCpu(pVM);
14014 if (!pVCpu)
14015 return;
14016 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14017 if (!pEvtRec)
14018 return;
14019 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14020 pEvtRec->u.IOPortStrRead.Port = Port;
14021 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14022 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14023 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14024 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14025}
14026
14027
14028VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14029{
14030 PVMCPU pVCpu = VMMGetCpu(pVM);
14031 if (!pVCpu)
14032 return;
14033 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14034 if (!pEvtRec)
14035 return;
14036 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14037 pEvtRec->u.IOPortStrWrite.Port = Port;
14038 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14039 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14040 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14041 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14042}
14043
14044
14045/**
14046 * Fakes and records an I/O port read.
14047 *
14048 * @returns VINF_SUCCESS.
14049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14050 * @param Port The I/O port.
14051 * @param pu32Value Where to store the fake value.
14052 * @param cbValue The size of the access.
14053 */
14054IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14055{
14056 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14057 if (pEvtRec)
14058 {
14059 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14060 pEvtRec->u.IOPortRead.Port = Port;
14061 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14062 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14063 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14064 }
14065 pVCpu->iem.s.cIOReads++;
14066 *pu32Value = 0xcccccccc;
14067 return VINF_SUCCESS;
14068}
14069
14070
14071/**
14072 * Fakes and records an I/O port write.
14073 *
14074 * @returns VINF_SUCCESS.
14075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14076 * @param Port The I/O port.
14077 * @param u32Value The value being written.
14078 * @param cbValue The size of the access.
14079 */
14080IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14081{
14082 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14083 if (pEvtRec)
14084 {
14085 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14086 pEvtRec->u.IOPortWrite.Port = Port;
14087 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14088 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14089 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14090 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14091 }
14092 pVCpu->iem.s.cIOWrites++;
14093 return VINF_SUCCESS;
14094}
14095
14096
14097/**
14098 * Used to add extra details about a stub case.
14099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14100 */
14101IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14102{
14103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14104 PVM pVM = pVCpu->CTX_SUFF(pVM);
14105 PVMCPU pVCpu = pVCpu;
14106 char szRegs[4096];
14107 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14108 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14109 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14110 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14111 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14112 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14113 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14114 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14115 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14116 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14117 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14118 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14119 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14120 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14121 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14122 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14123 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14124 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14125 " efer=%016VR{efer}\n"
14126 " pat=%016VR{pat}\n"
14127 " sf_mask=%016VR{sf_mask}\n"
14128 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14129 " lstar=%016VR{lstar}\n"
14130 " star=%016VR{star} cstar=%016VR{cstar}\n"
14131 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14132 );
14133
14134 char szInstr1[256];
14135 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14136 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14137 szInstr1, sizeof(szInstr1), NULL);
14138 char szInstr2[256];
14139 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14140 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14141 szInstr2, sizeof(szInstr2), NULL);
14142
14143 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14144}
14145
14146
14147/**
14148 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14149 * dump to the assertion info.
14150 *
14151 * @param pEvtRec The record to dump.
14152 */
14153IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14154{
14155 switch (pEvtRec->enmEvent)
14156 {
14157 case IEMVERIFYEVENT_IOPORT_READ:
14158 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14159 pEvtRec->u.IOPortWrite.Port,
14160 pEvtRec->u.IOPortWrite.cbValue);
14161 break;
14162 case IEMVERIFYEVENT_IOPORT_WRITE:
14163 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14164 pEvtRec->u.IOPortWrite.Port,
14165 pEvtRec->u.IOPortWrite.cbValue,
14166 pEvtRec->u.IOPortWrite.u32Value);
14167 break;
14168 case IEMVERIFYEVENT_IOPORT_STR_READ:
14169 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14170 pEvtRec->u.IOPortStrWrite.Port,
14171 pEvtRec->u.IOPortStrWrite.cbValue,
14172 pEvtRec->u.IOPortStrWrite.cTransfers);
14173 break;
14174 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14175 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14176 pEvtRec->u.IOPortStrWrite.Port,
14177 pEvtRec->u.IOPortStrWrite.cbValue,
14178 pEvtRec->u.IOPortStrWrite.cTransfers);
14179 break;
14180 case IEMVERIFYEVENT_RAM_READ:
14181 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14182 pEvtRec->u.RamRead.GCPhys,
14183 pEvtRec->u.RamRead.cb);
14184 break;
14185 case IEMVERIFYEVENT_RAM_WRITE:
14186 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14187 pEvtRec->u.RamWrite.GCPhys,
14188 pEvtRec->u.RamWrite.cb,
14189 (int)pEvtRec->u.RamWrite.cb,
14190 pEvtRec->u.RamWrite.ab);
14191 break;
14192 default:
14193 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14194 break;
14195 }
14196}
14197
14198
14199/**
14200 * Raises an assertion on the specified record, showing the given message with
14201 * a record dump attached.
14202 *
14203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14204 * @param pEvtRec1 The first record.
14205 * @param pEvtRec2 The second record.
14206 * @param pszMsg The message explaining why we're asserting.
14207 */
14208IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14209{
14210 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14211 iemVerifyAssertAddRecordDump(pEvtRec1);
14212 iemVerifyAssertAddRecordDump(pEvtRec2);
14213 iemVerifyAssertMsg2(pVCpu);
14214 RTAssertPanic();
14215}
14216
14217
14218/**
14219 * Raises an assertion on the specified record, showing the given message with
14220 * a record dump attached.
14221 *
14222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14223 * @param pEvtRec1 The first record.
14224 * @param pszMsg The message explaining why we're asserting.
14225 */
14226IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14227{
14228 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14229 iemVerifyAssertAddRecordDump(pEvtRec);
14230 iemVerifyAssertMsg2(pVCpu);
14231 RTAssertPanic();
14232}
14233
14234
14235/**
14236 * Verifies a write record.
14237 *
14238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14239 * @param pEvtRec The write record.
14240 * @param fRem Set if REM was doing the other executing. If clear
14241 * it was HM.
14242 */
14243IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14244{
14245 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14246 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14247 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14248 if ( RT_FAILURE(rc)
14249 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14250 {
14251 /* fend off ins */
14252 if ( !pVCpu->iem.s.cIOReads
14253 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14254 || ( pEvtRec->u.RamWrite.cb != 1
14255 && pEvtRec->u.RamWrite.cb != 2
14256 && pEvtRec->u.RamWrite.cb != 4) )
14257 {
14258 /* fend off ROMs and MMIO */
14259 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14260 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14261 {
14262 /* fend off fxsave */
14263 if (pEvtRec->u.RamWrite.cb != 512)
14264 {
14265 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14266 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14267 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14268 RTAssertMsg2Add("%s: %.*Rhxs\n"
14269 "iem: %.*Rhxs\n",
14270 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14271 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14272 iemVerifyAssertAddRecordDump(pEvtRec);
14273 iemVerifyAssertMsg2(pVCpu);
14274 RTAssertPanic();
14275 }
14276 }
14277 }
14278 }
14279
14280}
14281
14282/**
14283 * Performs the post-execution verfication checks.
14284 */
14285IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14286{
14287 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14288 return rcStrictIem;
14289
14290 /*
14291 * Switch back the state.
14292 */
14293 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14294 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14295 Assert(pOrgCtx != pDebugCtx);
14296 IEM_GET_CTX(pVCpu) = pOrgCtx;
14297
14298 /*
14299 * Execute the instruction in REM.
14300 */
14301 bool fRem = false;
14302 PVM pVM = pVCpu->CTX_SUFF(pVM);
14303 PVMCPU pVCpu = pVCpu;
14304 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14305#ifdef IEM_VERIFICATION_MODE_FULL_HM
14306 if ( HMIsEnabled(pVM)
14307 && pVCpu->iem.s.cIOReads == 0
14308 && pVCpu->iem.s.cIOWrites == 0
14309 && !pVCpu->iem.s.fProblematicMemory)
14310 {
14311 uint64_t uStartRip = pOrgCtx->rip;
14312 unsigned iLoops = 0;
14313 do
14314 {
14315 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14316 iLoops++;
14317 } while ( rc == VINF_SUCCESS
14318 || ( rc == VINF_EM_DBG_STEPPED
14319 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14320 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14321 || ( pOrgCtx->rip != pDebugCtx->rip
14322 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14323 && iLoops < 8) );
14324 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14325 rc = VINF_SUCCESS;
14326 }
14327#endif
14328 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14329 || rc == VINF_IOM_R3_IOPORT_READ
14330 || rc == VINF_IOM_R3_IOPORT_WRITE
14331 || rc == VINF_IOM_R3_MMIO_READ
14332 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14333 || rc == VINF_IOM_R3_MMIO_WRITE
14334 || rc == VINF_CPUM_R3_MSR_READ
14335 || rc == VINF_CPUM_R3_MSR_WRITE
14336 || rc == VINF_EM_RESCHEDULE
14337 )
14338 {
14339 EMRemLock(pVM);
14340 rc = REMR3EmulateInstruction(pVM, pVCpu);
14341 AssertRC(rc);
14342 EMRemUnlock(pVM);
14343 fRem = true;
14344 }
14345
14346# if 1 /* Skip unimplemented instructions for now. */
14347 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14348 {
14349 IEM_GET_CTX(pVCpu) = pOrgCtx;
14350 if (rc == VINF_EM_DBG_STEPPED)
14351 return VINF_SUCCESS;
14352 return rc;
14353 }
14354# endif
14355
14356 /*
14357 * Compare the register states.
14358 */
14359 unsigned cDiffs = 0;
14360 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14361 {
14362 //Log(("REM and IEM ends up with different registers!\n"));
14363 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14364
14365# define CHECK_FIELD(a_Field) \
14366 do \
14367 { \
14368 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14369 { \
14370 switch (sizeof(pOrgCtx->a_Field)) \
14371 { \
14372 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14373 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14374 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14375 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14376 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14377 } \
14378 cDiffs++; \
14379 } \
14380 } while (0)
14381# define CHECK_XSTATE_FIELD(a_Field) \
14382 do \
14383 { \
14384 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14385 { \
14386 switch (sizeof(pOrgXState->a_Field)) \
14387 { \
14388 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14389 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14390 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14391 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14392 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14393 } \
14394 cDiffs++; \
14395 } \
14396 } while (0)
14397
14398# define CHECK_BIT_FIELD(a_Field) \
14399 do \
14400 { \
14401 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14402 { \
14403 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14404 cDiffs++; \
14405 } \
14406 } while (0)
14407
14408# define CHECK_SEL(a_Sel) \
14409 do \
14410 { \
14411 CHECK_FIELD(a_Sel.Sel); \
14412 CHECK_FIELD(a_Sel.Attr.u); \
14413 CHECK_FIELD(a_Sel.u64Base); \
14414 CHECK_FIELD(a_Sel.u32Limit); \
14415 CHECK_FIELD(a_Sel.fFlags); \
14416 } while (0)
14417
14418 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14419 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14420
14421#if 1 /* The recompiler doesn't update these the intel way. */
14422 if (fRem)
14423 {
14424 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14425 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14426 pOrgXState->x87.CS = pDebugXState->x87.CS;
14427 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14428 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14429 pOrgXState->x87.DS = pDebugXState->x87.DS;
14430 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14431 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14432 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14433 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14434 }
14435#endif
14436 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14437 {
14438 RTAssertMsg2Weak(" the FPU state differs\n");
14439 cDiffs++;
14440 CHECK_XSTATE_FIELD(x87.FCW);
14441 CHECK_XSTATE_FIELD(x87.FSW);
14442 CHECK_XSTATE_FIELD(x87.FTW);
14443 CHECK_XSTATE_FIELD(x87.FOP);
14444 CHECK_XSTATE_FIELD(x87.FPUIP);
14445 CHECK_XSTATE_FIELD(x87.CS);
14446 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14447 CHECK_XSTATE_FIELD(x87.FPUDP);
14448 CHECK_XSTATE_FIELD(x87.DS);
14449 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14450 CHECK_XSTATE_FIELD(x87.MXCSR);
14451 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14452 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14453 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14454 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14455 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14456 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14457 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14458 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14459 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14460 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14461 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14462 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14463 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14464 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14465 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14466 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14467 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14468 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14469 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14470 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14471 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14472 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14473 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14474 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14475 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14476 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14477 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14478 }
14479 CHECK_FIELD(rip);
14480 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14481 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14482 {
14483 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14484 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14485 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14486 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14487 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14488 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14489 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14490 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14491 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14492 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14493 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14494 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14495 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14496 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14497 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14498 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14499 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14500 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14501 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14502 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14503 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14504 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14505 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14506 }
14507
14508 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14509 CHECK_FIELD(rax);
14510 CHECK_FIELD(rcx);
14511 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14512 CHECK_FIELD(rdx);
14513 CHECK_FIELD(rbx);
14514 CHECK_FIELD(rsp);
14515 CHECK_FIELD(rbp);
14516 CHECK_FIELD(rsi);
14517 CHECK_FIELD(rdi);
14518 CHECK_FIELD(r8);
14519 CHECK_FIELD(r9);
14520 CHECK_FIELD(r10);
14521 CHECK_FIELD(r11);
14522 CHECK_FIELD(r12);
14523 CHECK_FIELD(r13);
14524 CHECK_SEL(cs);
14525 CHECK_SEL(ss);
14526 CHECK_SEL(ds);
14527 CHECK_SEL(es);
14528 CHECK_SEL(fs);
14529 CHECK_SEL(gs);
14530 CHECK_FIELD(cr0);
14531
14532 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14533 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14534 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14535 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14536 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14537 {
14538 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14539 { /* ignore */ }
14540 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14541 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14542 && fRem)
14543 { /* ignore */ }
14544 else
14545 CHECK_FIELD(cr2);
14546 }
14547 CHECK_FIELD(cr3);
14548 CHECK_FIELD(cr4);
14549 CHECK_FIELD(dr[0]);
14550 CHECK_FIELD(dr[1]);
14551 CHECK_FIELD(dr[2]);
14552 CHECK_FIELD(dr[3]);
14553 CHECK_FIELD(dr[6]);
14554 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14555 CHECK_FIELD(dr[7]);
14556 CHECK_FIELD(gdtr.cbGdt);
14557 CHECK_FIELD(gdtr.pGdt);
14558 CHECK_FIELD(idtr.cbIdt);
14559 CHECK_FIELD(idtr.pIdt);
14560 CHECK_SEL(ldtr);
14561 CHECK_SEL(tr);
14562 CHECK_FIELD(SysEnter.cs);
14563 CHECK_FIELD(SysEnter.eip);
14564 CHECK_FIELD(SysEnter.esp);
14565 CHECK_FIELD(msrEFER);
14566 CHECK_FIELD(msrSTAR);
14567 CHECK_FIELD(msrPAT);
14568 CHECK_FIELD(msrLSTAR);
14569 CHECK_FIELD(msrCSTAR);
14570 CHECK_FIELD(msrSFMASK);
14571 CHECK_FIELD(msrKERNELGSBASE);
14572
14573 if (cDiffs != 0)
14574 {
14575 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14576 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14577 RTAssertPanic();
14578 static bool volatile s_fEnterDebugger = true;
14579 if (s_fEnterDebugger)
14580 DBGFSTOP(pVM);
14581
14582# if 1 /* Ignore unimplemented instructions for now. */
14583 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14584 rcStrictIem = VINF_SUCCESS;
14585# endif
14586 }
14587# undef CHECK_FIELD
14588# undef CHECK_BIT_FIELD
14589 }
14590
14591 /*
14592 * If the register state compared fine, check the verification event
14593 * records.
14594 */
14595 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14596 {
14597 /*
14598 * Compare verficiation event records.
14599 * - I/O port accesses should be a 1:1 match.
14600 */
14601 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14602 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14603 while (pIemRec && pOtherRec)
14604 {
14605 /* Since we might miss RAM writes and reads, ignore reads and check
14606 that any written memory is the same extra ones. */
14607 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14608 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14609 && pIemRec->pNext)
14610 {
14611 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14612 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14613 pIemRec = pIemRec->pNext;
14614 }
14615
14616 /* Do the compare. */
14617 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14618 {
14619 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14620 break;
14621 }
14622 bool fEquals;
14623 switch (pIemRec->enmEvent)
14624 {
14625 case IEMVERIFYEVENT_IOPORT_READ:
14626 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14627 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14628 break;
14629 case IEMVERIFYEVENT_IOPORT_WRITE:
14630 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14631 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14632 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14633 break;
14634 case IEMVERIFYEVENT_IOPORT_STR_READ:
14635 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14636 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14637 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14638 break;
14639 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14640 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14641 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14642 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14643 break;
14644 case IEMVERIFYEVENT_RAM_READ:
14645 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14646 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14647 break;
14648 case IEMVERIFYEVENT_RAM_WRITE:
14649 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14650 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14651 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14652 break;
14653 default:
14654 fEquals = false;
14655 break;
14656 }
14657 if (!fEquals)
14658 {
14659 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14660 break;
14661 }
14662
14663 /* advance */
14664 pIemRec = pIemRec->pNext;
14665 pOtherRec = pOtherRec->pNext;
14666 }
14667
14668 /* Ignore extra writes and reads. */
14669 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14670 {
14671 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14672 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14673 pIemRec = pIemRec->pNext;
14674 }
14675 if (pIemRec != NULL)
14676 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14677 else if (pOtherRec != NULL)
14678 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14679 }
14680 IEM_GET_CTX(pVCpu) = pOrgCtx;
14681
14682 return rcStrictIem;
14683}
14684
14685#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14686
14687/* stubs */
14688IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14689{
14690 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14691 return VERR_INTERNAL_ERROR;
14692}
14693
14694IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14695{
14696 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14697 return VERR_INTERNAL_ERROR;
14698}
14699
14700#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14701
14702
14703#ifdef LOG_ENABLED
14704/**
14705 * Logs the current instruction.
14706 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14707 * @param pCtx The current CPU context.
14708 * @param fSameCtx Set if we have the same context information as the VMM,
14709 * clear if we may have already executed an instruction in
14710 * our debug context. When clear, we assume IEMCPU holds
14711 * valid CPU mode info.
14712 */
14713IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14714{
14715# ifdef IN_RING3
14716 if (LogIs2Enabled())
14717 {
14718 char szInstr[256];
14719 uint32_t cbInstr = 0;
14720 if (fSameCtx)
14721 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14722 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14723 szInstr, sizeof(szInstr), &cbInstr);
14724 else
14725 {
14726 uint32_t fFlags = 0;
14727 switch (pVCpu->iem.s.enmCpuMode)
14728 {
14729 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14730 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14731 case IEMMODE_16BIT:
14732 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14733 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14734 else
14735 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14736 break;
14737 }
14738 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14739 szInstr, sizeof(szInstr), &cbInstr);
14740 }
14741
14742 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14743 Log2(("****\n"
14744 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14745 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14746 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14747 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14748 " %s\n"
14749 ,
14750 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14751 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14752 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14753 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14754 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14755 szInstr));
14756
14757 if (LogIs3Enabled())
14758 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14759 }
14760 else
14761# endif
14762 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14763 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14764 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14765}
14766#endif
14767
14768
14769/**
14770 * Makes status code addjustments (pass up from I/O and access handler)
14771 * as well as maintaining statistics.
14772 *
14773 * @returns Strict VBox status code to pass up.
14774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14775 * @param rcStrict The status from executing an instruction.
14776 */
14777DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14778{
14779 if (rcStrict != VINF_SUCCESS)
14780 {
14781 if (RT_SUCCESS(rcStrict))
14782 {
14783 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14784 || rcStrict == VINF_IOM_R3_IOPORT_READ
14785 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14786 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14787 || rcStrict == VINF_IOM_R3_MMIO_READ
14788 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14789 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14790 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14791 || rcStrict == VINF_CPUM_R3_MSR_READ
14792 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14793 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14794 || rcStrict == VINF_EM_RAW_TO_R3
14795 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14796 /* raw-mode / virt handlers only: */
14797 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14798 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14799 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14800 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14801 || rcStrict == VINF_SELM_SYNC_GDT
14802 || rcStrict == VINF_CSAM_PENDING_ACTION
14803 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14804 /* nested hw.virt codes: */
14805 || rcStrict == VINF_SVM_VMEXIT
14806 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14807/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14808 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14809#ifdef VBOX_WITH_NESTED_HWVIRT
14810 if ( rcStrict == VINF_SVM_VMEXIT
14811 && rcPassUp == VINF_SUCCESS)
14812 rcStrict = VINF_SUCCESS;
14813 else
14814#endif
14815 if (rcPassUp == VINF_SUCCESS)
14816 pVCpu->iem.s.cRetInfStatuses++;
14817 else if ( rcPassUp < VINF_EM_FIRST
14818 || rcPassUp > VINF_EM_LAST
14819 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14820 {
14821 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14822 pVCpu->iem.s.cRetPassUpStatus++;
14823 rcStrict = rcPassUp;
14824 }
14825 else
14826 {
14827 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14828 pVCpu->iem.s.cRetInfStatuses++;
14829 }
14830 }
14831 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14832 pVCpu->iem.s.cRetAspectNotImplemented++;
14833 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14834 pVCpu->iem.s.cRetInstrNotImplemented++;
14835#ifdef IEM_VERIFICATION_MODE_FULL
14836 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14837 rcStrict = VINF_SUCCESS;
14838#endif
14839 else
14840 pVCpu->iem.s.cRetErrStatuses++;
14841 }
14842 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14843 {
14844 pVCpu->iem.s.cRetPassUpStatus++;
14845 rcStrict = pVCpu->iem.s.rcPassUp;
14846 }
14847
14848 return rcStrict;
14849}
14850
14851
14852/**
14853 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14854 * IEMExecOneWithPrefetchedByPC.
14855 *
14856 * Similar code is found in IEMExecLots.
14857 *
14858 * @return Strict VBox status code.
14859 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14861 * @param fExecuteInhibit If set, execute the instruction following CLI,
14862 * POP SS and MOV SS,GR.
14863 */
14864DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14865{
14866#ifdef IEM_WITH_SETJMP
14867 VBOXSTRICTRC rcStrict;
14868 jmp_buf JmpBuf;
14869 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14870 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14871 if ((rcStrict = setjmp(JmpBuf)) == 0)
14872 {
14873 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14874 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14875 }
14876 else
14877 pVCpu->iem.s.cLongJumps++;
14878 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14879#else
14880 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14881 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14882#endif
14883 if (rcStrict == VINF_SUCCESS)
14884 pVCpu->iem.s.cInstructions++;
14885 if (pVCpu->iem.s.cActiveMappings > 0)
14886 {
14887 Assert(rcStrict != VINF_SUCCESS);
14888 iemMemRollback(pVCpu);
14889 }
14890//#ifdef DEBUG
14891// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14892//#endif
14893
14894 /* Execute the next instruction as well if a cli, pop ss or
14895 mov ss, Gr has just completed successfully. */
14896 if ( fExecuteInhibit
14897 && rcStrict == VINF_SUCCESS
14898 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14899 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14900 {
14901 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14902 if (rcStrict == VINF_SUCCESS)
14903 {
14904#ifdef LOG_ENABLED
14905 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14906#endif
14907#ifdef IEM_WITH_SETJMP
14908 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14909 if ((rcStrict = setjmp(JmpBuf)) == 0)
14910 {
14911 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14912 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14913 }
14914 else
14915 pVCpu->iem.s.cLongJumps++;
14916 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14917#else
14918 IEM_OPCODE_GET_NEXT_U8(&b);
14919 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14920#endif
14921 if (rcStrict == VINF_SUCCESS)
14922 pVCpu->iem.s.cInstructions++;
14923 if (pVCpu->iem.s.cActiveMappings > 0)
14924 {
14925 Assert(rcStrict != VINF_SUCCESS);
14926 iemMemRollback(pVCpu);
14927 }
14928 }
14929 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14930 }
14931
14932 /*
14933 * Return value fiddling, statistics and sanity assertions.
14934 */
14935 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14936
14937 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14939#if defined(IEM_VERIFICATION_MODE_FULL)
14940 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14944#endif
14945 return rcStrict;
14946}
14947
14948
14949#ifdef IN_RC
14950/**
14951 * Re-enters raw-mode or ensure we return to ring-3.
14952 *
14953 * @returns rcStrict, maybe modified.
14954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14955 * @param pCtx The current CPU context.
14956 * @param rcStrict The status code returne by the interpreter.
14957 */
14958DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14959{
14960 if ( !pVCpu->iem.s.fInPatchCode
14961 && ( rcStrict == VINF_SUCCESS
14962 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14963 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14964 {
14965 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14966 CPUMRawEnter(pVCpu);
14967 else
14968 {
14969 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14970 rcStrict = VINF_EM_RESCHEDULE;
14971 }
14972 }
14973 return rcStrict;
14974}
14975#endif
14976
14977
14978/**
14979 * Execute one instruction.
14980 *
14981 * @return Strict VBox status code.
14982 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14983 */
14984VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14985{
14986#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14987 if (++pVCpu->iem.s.cVerifyDepth == 1)
14988 iemExecVerificationModeSetup(pVCpu);
14989#endif
14990#ifdef LOG_ENABLED
14991 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14992 iemLogCurInstr(pVCpu, pCtx, true);
14993#endif
14994
14995 /*
14996 * Do the decoding and emulation.
14997 */
14998 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14999 if (rcStrict == VINF_SUCCESS)
15000 rcStrict = iemExecOneInner(pVCpu, true);
15001
15002#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15003 /*
15004 * Assert some sanity.
15005 */
15006 if (pVCpu->iem.s.cVerifyDepth == 1)
15007 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15008 pVCpu->iem.s.cVerifyDepth--;
15009#endif
15010#ifdef IN_RC
15011 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15012#endif
15013 if (rcStrict != VINF_SUCCESS)
15014 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15015 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15016 return rcStrict;
15017}
15018
15019
15020VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15021{
15022 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15023 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15024
15025 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15026 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15027 if (rcStrict == VINF_SUCCESS)
15028 {
15029 rcStrict = iemExecOneInner(pVCpu, true);
15030 if (pcbWritten)
15031 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15032 }
15033
15034#ifdef IN_RC
15035 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15036#endif
15037 return rcStrict;
15038}
15039
15040
15041VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15042 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15043{
15044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15045 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15046
15047 VBOXSTRICTRC rcStrict;
15048 if ( cbOpcodeBytes
15049 && pCtx->rip == OpcodeBytesPC)
15050 {
15051 iemInitDecoder(pVCpu, false);
15052#ifdef IEM_WITH_CODE_TLB
15053 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15054 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15055 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15056 pVCpu->iem.s.offCurInstrStart = 0;
15057 pVCpu->iem.s.offInstrNextByte = 0;
15058#else
15059 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15060 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15061#endif
15062 rcStrict = VINF_SUCCESS;
15063 }
15064 else
15065 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15066 if (rcStrict == VINF_SUCCESS)
15067 {
15068 rcStrict = iemExecOneInner(pVCpu, true);
15069 }
15070
15071#ifdef IN_RC
15072 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15073#endif
15074 return rcStrict;
15075}
15076
15077
15078VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15079{
15080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15081 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15082
15083 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15084 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15085 if (rcStrict == VINF_SUCCESS)
15086 {
15087 rcStrict = iemExecOneInner(pVCpu, false);
15088 if (pcbWritten)
15089 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15090 }
15091
15092#ifdef IN_RC
15093 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15094#endif
15095 return rcStrict;
15096}
15097
15098
15099VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15100 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15101{
15102 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15103 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15104
15105 VBOXSTRICTRC rcStrict;
15106 if ( cbOpcodeBytes
15107 && pCtx->rip == OpcodeBytesPC)
15108 {
15109 iemInitDecoder(pVCpu, true);
15110#ifdef IEM_WITH_CODE_TLB
15111 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15112 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15113 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15114 pVCpu->iem.s.offCurInstrStart = 0;
15115 pVCpu->iem.s.offInstrNextByte = 0;
15116#else
15117 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15118 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15119#endif
15120 rcStrict = VINF_SUCCESS;
15121 }
15122 else
15123 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15124 if (rcStrict == VINF_SUCCESS)
15125 rcStrict = iemExecOneInner(pVCpu, false);
15126
15127#ifdef IN_RC
15128 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15129#endif
15130 return rcStrict;
15131}
15132
15133
15134/**
15135 * For debugging DISGetParamSize, may come in handy.
15136 *
15137 * @returns Strict VBox status code.
15138 * @param pVCpu The cross context virtual CPU structure of the
15139 * calling EMT.
15140 * @param pCtxCore The context core structure.
15141 * @param OpcodeBytesPC The PC of the opcode bytes.
15142 * @param pvOpcodeBytes Prefeched opcode bytes.
15143 * @param cbOpcodeBytes Number of prefetched bytes.
15144 * @param pcbWritten Where to return the number of bytes written.
15145 * Optional.
15146 */
15147VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15148 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15149 uint32_t *pcbWritten)
15150{
15151 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15152 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15153
15154 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15155 VBOXSTRICTRC rcStrict;
15156 if ( cbOpcodeBytes
15157 && pCtx->rip == OpcodeBytesPC)
15158 {
15159 iemInitDecoder(pVCpu, true);
15160#ifdef IEM_WITH_CODE_TLB
15161 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15162 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15163 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15164 pVCpu->iem.s.offCurInstrStart = 0;
15165 pVCpu->iem.s.offInstrNextByte = 0;
15166#else
15167 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15168 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15169#endif
15170 rcStrict = VINF_SUCCESS;
15171 }
15172 else
15173 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15174 if (rcStrict == VINF_SUCCESS)
15175 {
15176 rcStrict = iemExecOneInner(pVCpu, false);
15177 if (pcbWritten)
15178 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15179 }
15180
15181#ifdef IN_RC
15182 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15183#endif
15184 return rcStrict;
15185}
15186
15187
15188VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15189{
15190 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15191
15192#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15193 /*
15194 * See if there is an interrupt pending in TRPM, inject it if we can.
15195 */
15196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15197# ifdef IEM_VERIFICATION_MODE_FULL
15198 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15199# endif
15200
15201 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15202#if defined(VBOX_WITH_NESTED_HWVIRT)
15203 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15204 if (fIntrEnabled)
15205 {
15206 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15207 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
15208 else
15209 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15210 }
15211#else
15212 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15213#endif
15214 if ( fIntrEnabled
15215 && TRPMHasTrap(pVCpu)
15216 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15217 {
15218 uint8_t u8TrapNo;
15219 TRPMEVENT enmType;
15220 RTGCUINT uErrCode;
15221 RTGCPTR uCr2;
15222 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15223 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15224 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15225 TRPMResetTrap(pVCpu);
15226 }
15227
15228 /*
15229 * Log the state.
15230 */
15231# ifdef LOG_ENABLED
15232 iemLogCurInstr(pVCpu, pCtx, true);
15233# endif
15234
15235 /*
15236 * Do the decoding and emulation.
15237 */
15238 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15239 if (rcStrict == VINF_SUCCESS)
15240 rcStrict = iemExecOneInner(pVCpu, true);
15241
15242 /*
15243 * Assert some sanity.
15244 */
15245 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15246
15247 /*
15248 * Log and return.
15249 */
15250 if (rcStrict != VINF_SUCCESS)
15251 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15252 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15253 if (pcInstructions)
15254 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15255 return rcStrict;
15256
15257#else /* Not verification mode */
15258
15259 /*
15260 * See if there is an interrupt pending in TRPM, inject it if we can.
15261 */
15262 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15263# ifdef IEM_VERIFICATION_MODE_FULL
15264 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15265# endif
15266
15267 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15268#if defined(VBOX_WITH_NESTED_HWVIRT)
15269 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15270 if (fIntrEnabled)
15271 {
15272 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15273 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
15274 else
15275 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15276 }
15277#else
15278 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15279#endif
15280 if ( fIntrEnabled
15281 && TRPMHasTrap(pVCpu)
15282 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15283 {
15284 uint8_t u8TrapNo;
15285 TRPMEVENT enmType;
15286 RTGCUINT uErrCode;
15287 RTGCPTR uCr2;
15288 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15289 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15290 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15291 TRPMResetTrap(pVCpu);
15292 }
15293
15294 /*
15295 * Initial decoder init w/ prefetch, then setup setjmp.
15296 */
15297 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15298 if (rcStrict == VINF_SUCCESS)
15299 {
15300# ifdef IEM_WITH_SETJMP
15301 jmp_buf JmpBuf;
15302 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15303 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15304 pVCpu->iem.s.cActiveMappings = 0;
15305 if ((rcStrict = setjmp(JmpBuf)) == 0)
15306# endif
15307 {
15308 /*
15309 * The run loop. We limit ourselves to 4096 instructions right now.
15310 */
15311 PVM pVM = pVCpu->CTX_SUFF(pVM);
15312 uint32_t cInstr = 4096;
15313 for (;;)
15314 {
15315 /*
15316 * Log the state.
15317 */
15318# ifdef LOG_ENABLED
15319 iemLogCurInstr(pVCpu, pCtx, true);
15320# endif
15321
15322 /*
15323 * Do the decoding and emulation.
15324 */
15325 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15326 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15327 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15328 {
15329 Assert(pVCpu->iem.s.cActiveMappings == 0);
15330 pVCpu->iem.s.cInstructions++;
15331 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15332 {
15333 uint32_t fCpu = pVCpu->fLocalForcedActions
15334 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15335 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15336 | VMCPU_FF_TLB_FLUSH
15337# ifdef VBOX_WITH_RAW_MODE
15338 | VMCPU_FF_TRPM_SYNC_IDT
15339 | VMCPU_FF_SELM_SYNC_TSS
15340 | VMCPU_FF_SELM_SYNC_GDT
15341 | VMCPU_FF_SELM_SYNC_LDT
15342# endif
15343 | VMCPU_FF_INHIBIT_INTERRUPTS
15344 | VMCPU_FF_BLOCK_NMIS
15345 | VMCPU_FF_UNHALT ));
15346
15347 if (RT_LIKELY( ( !fCpu
15348 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15349 && !pCtx->rflags.Bits.u1IF) )
15350 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15351 {
15352 if (cInstr-- > 0)
15353 {
15354 Assert(pVCpu->iem.s.cActiveMappings == 0);
15355 iemReInitDecoder(pVCpu);
15356 continue;
15357 }
15358 }
15359 }
15360 Assert(pVCpu->iem.s.cActiveMappings == 0);
15361 }
15362 else if (pVCpu->iem.s.cActiveMappings > 0)
15363 iemMemRollback(pVCpu);
15364 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15365 break;
15366 }
15367 }
15368# ifdef IEM_WITH_SETJMP
15369 else
15370 {
15371 if (pVCpu->iem.s.cActiveMappings > 0)
15372 iemMemRollback(pVCpu);
15373 pVCpu->iem.s.cLongJumps++;
15374# ifdef VBOX_WITH_NESTED_HWVIRT
15375 /*
15376 * When a nested-guest causes an exception intercept when fetching memory
15377 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15378 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15379 */
15380 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15381# endif
15382 }
15383 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15384# endif
15385
15386 /*
15387 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15388 */
15389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15391# if defined(IEM_VERIFICATION_MODE_FULL)
15392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15396# endif
15397 }
15398# ifdef VBOX_WITH_NESTED_HWVIRT
15399 else
15400 {
15401 /*
15402 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15403 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15404 */
15405 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15406 }
15407# endif
15408
15409 /*
15410 * Maybe re-enter raw-mode and log.
15411 */
15412# ifdef IN_RC
15413 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15414# endif
15415 if (rcStrict != VINF_SUCCESS)
15416 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15417 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15418 if (pcInstructions)
15419 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15420 return rcStrict;
15421#endif /* Not verification mode */
15422}
15423
15424
15425
15426/**
15427 * Injects a trap, fault, abort, software interrupt or external interrupt.
15428 *
15429 * The parameter list matches TRPMQueryTrapAll pretty closely.
15430 *
15431 * @returns Strict VBox status code.
15432 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15433 * @param u8TrapNo The trap number.
15434 * @param enmType What type is it (trap/fault/abort), software
15435 * interrupt or hardware interrupt.
15436 * @param uErrCode The error code if applicable.
15437 * @param uCr2 The CR2 value if applicable.
15438 * @param cbInstr The instruction length (only relevant for
15439 * software interrupts).
15440 */
15441VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15442 uint8_t cbInstr)
15443{
15444 iemInitDecoder(pVCpu, false);
15445#ifdef DBGFTRACE_ENABLED
15446 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15447 u8TrapNo, enmType, uErrCode, uCr2);
15448#endif
15449
15450 uint32_t fFlags;
15451 switch (enmType)
15452 {
15453 case TRPM_HARDWARE_INT:
15454 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15455 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15456 uErrCode = uCr2 = 0;
15457 break;
15458
15459 case TRPM_SOFTWARE_INT:
15460 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15461 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15462 uErrCode = uCr2 = 0;
15463 break;
15464
15465 case TRPM_TRAP:
15466 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15467 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15468 if (u8TrapNo == X86_XCPT_PF)
15469 fFlags |= IEM_XCPT_FLAGS_CR2;
15470 switch (u8TrapNo)
15471 {
15472 case X86_XCPT_DF:
15473 case X86_XCPT_TS:
15474 case X86_XCPT_NP:
15475 case X86_XCPT_SS:
15476 case X86_XCPT_PF:
15477 case X86_XCPT_AC:
15478 fFlags |= IEM_XCPT_FLAGS_ERR;
15479 break;
15480
15481 case X86_XCPT_NMI:
15482 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15483 break;
15484 }
15485 break;
15486
15487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15488 }
15489
15490 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15491}
15492
15493
15494/**
15495 * Injects the active TRPM event.
15496 *
15497 * @returns Strict VBox status code.
15498 * @param pVCpu The cross context virtual CPU structure.
15499 */
15500VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15501{
15502#ifndef IEM_IMPLEMENTS_TASKSWITCH
15503 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15504#else
15505 uint8_t u8TrapNo;
15506 TRPMEVENT enmType;
15507 RTGCUINT uErrCode;
15508 RTGCUINTPTR uCr2;
15509 uint8_t cbInstr;
15510 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15511 if (RT_FAILURE(rc))
15512 return rc;
15513
15514 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15515
15516 /** @todo Are there any other codes that imply the event was successfully
15517 * delivered to the guest? See @bugref{6607}. */
15518 if ( rcStrict == VINF_SUCCESS
15519 || rcStrict == VINF_IEM_RAISED_XCPT)
15520 {
15521 TRPMResetTrap(pVCpu);
15522 }
15523 return rcStrict;
15524#endif
15525}
15526
15527
15528VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15529{
15530 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15531 return VERR_NOT_IMPLEMENTED;
15532}
15533
15534
15535VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15536{
15537 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15538 return VERR_NOT_IMPLEMENTED;
15539}
15540
15541
15542#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15543/**
15544 * Executes a IRET instruction with default operand size.
15545 *
15546 * This is for PATM.
15547 *
15548 * @returns VBox status code.
15549 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15550 * @param pCtxCore The register frame.
15551 */
15552VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15553{
15554 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15555
15556 iemCtxCoreToCtx(pCtx, pCtxCore);
15557 iemInitDecoder(pVCpu);
15558 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15559 if (rcStrict == VINF_SUCCESS)
15560 iemCtxToCtxCore(pCtxCore, pCtx);
15561 else
15562 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15563 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15564 return rcStrict;
15565}
15566#endif
15567
15568
15569/**
15570 * Macro used by the IEMExec* method to check the given instruction length.
15571 *
15572 * Will return on failure!
15573 *
15574 * @param a_cbInstr The given instruction length.
15575 * @param a_cbMin The minimum length.
15576 */
15577#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15578 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15579 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15580
15581
15582/**
15583 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15584 *
15585 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15586 *
15587 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15589 * @param rcStrict The status code to fiddle.
15590 */
15591DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15592{
15593 iemUninitExec(pVCpu);
15594#ifdef IN_RC
15595 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15596 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15597#else
15598 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15599#endif
15600}
15601
15602
15603/**
15604 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15605 *
15606 * This API ASSUMES that the caller has already verified that the guest code is
15607 * allowed to access the I/O port. (The I/O port is in the DX register in the
15608 * guest state.)
15609 *
15610 * @returns Strict VBox status code.
15611 * @param pVCpu The cross context virtual CPU structure.
15612 * @param cbValue The size of the I/O port access (1, 2, or 4).
15613 * @param enmAddrMode The addressing mode.
15614 * @param fRepPrefix Indicates whether a repeat prefix is used
15615 * (doesn't matter which for this instruction).
15616 * @param cbInstr The instruction length in bytes.
15617 * @param iEffSeg The effective segment address.
15618 * @param fIoChecked Whether the access to the I/O port has been
15619 * checked or not. It's typically checked in the
15620 * HM scenario.
15621 */
15622VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15623 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15624{
15625 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15626 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15627
15628 /*
15629 * State init.
15630 */
15631 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15632
15633 /*
15634 * Switch orgy for getting to the right handler.
15635 */
15636 VBOXSTRICTRC rcStrict;
15637 if (fRepPrefix)
15638 {
15639 switch (enmAddrMode)
15640 {
15641 case IEMMODE_16BIT:
15642 switch (cbValue)
15643 {
15644 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15645 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15646 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15647 default:
15648 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15649 }
15650 break;
15651
15652 case IEMMODE_32BIT:
15653 switch (cbValue)
15654 {
15655 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15656 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15657 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15658 default:
15659 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15660 }
15661 break;
15662
15663 case IEMMODE_64BIT:
15664 switch (cbValue)
15665 {
15666 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15667 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15668 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15669 default:
15670 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15671 }
15672 break;
15673
15674 default:
15675 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15676 }
15677 }
15678 else
15679 {
15680 switch (enmAddrMode)
15681 {
15682 case IEMMODE_16BIT:
15683 switch (cbValue)
15684 {
15685 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15686 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15687 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15688 default:
15689 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15690 }
15691 break;
15692
15693 case IEMMODE_32BIT:
15694 switch (cbValue)
15695 {
15696 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15697 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15698 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15699 default:
15700 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15701 }
15702 break;
15703
15704 case IEMMODE_64BIT:
15705 switch (cbValue)
15706 {
15707 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15708 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15709 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15710 default:
15711 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15712 }
15713 break;
15714
15715 default:
15716 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15717 }
15718 }
15719
15720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15721}
15722
15723
15724/**
15725 * Interface for HM and EM for executing string I/O IN (read) instructions.
15726 *
15727 * This API ASSUMES that the caller has already verified that the guest code is
15728 * allowed to access the I/O port. (The I/O port is in the DX register in the
15729 * guest state.)
15730 *
15731 * @returns Strict VBox status code.
15732 * @param pVCpu The cross context virtual CPU structure.
15733 * @param cbValue The size of the I/O port access (1, 2, or 4).
15734 * @param enmAddrMode The addressing mode.
15735 * @param fRepPrefix Indicates whether a repeat prefix is used
15736 * (doesn't matter which for this instruction).
15737 * @param cbInstr The instruction length in bytes.
15738 * @param fIoChecked Whether the access to the I/O port has been
15739 * checked or not. It's typically checked in the
15740 * HM scenario.
15741 */
15742VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15743 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15744{
15745 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15746
15747 /*
15748 * State init.
15749 */
15750 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15751
15752 /*
15753 * Switch orgy for getting to the right handler.
15754 */
15755 VBOXSTRICTRC rcStrict;
15756 if (fRepPrefix)
15757 {
15758 switch (enmAddrMode)
15759 {
15760 case IEMMODE_16BIT:
15761 switch (cbValue)
15762 {
15763 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15764 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15765 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15766 default:
15767 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15768 }
15769 break;
15770
15771 case IEMMODE_32BIT:
15772 switch (cbValue)
15773 {
15774 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15775 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15776 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15777 default:
15778 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15779 }
15780 break;
15781
15782 case IEMMODE_64BIT:
15783 switch (cbValue)
15784 {
15785 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15786 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15787 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15788 default:
15789 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15790 }
15791 break;
15792
15793 default:
15794 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15795 }
15796 }
15797 else
15798 {
15799 switch (enmAddrMode)
15800 {
15801 case IEMMODE_16BIT:
15802 switch (cbValue)
15803 {
15804 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15805 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15806 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15807 default:
15808 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15809 }
15810 break;
15811
15812 case IEMMODE_32BIT:
15813 switch (cbValue)
15814 {
15815 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15816 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15817 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15818 default:
15819 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15820 }
15821 break;
15822
15823 case IEMMODE_64BIT:
15824 switch (cbValue)
15825 {
15826 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15827 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15828 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15829 default:
15830 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15831 }
15832 break;
15833
15834 default:
15835 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15836 }
15837 }
15838
15839 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15840}
15841
15842
15843/**
15844 * Interface for rawmode to write execute an OUT instruction.
15845 *
15846 * @returns Strict VBox status code.
15847 * @param pVCpu The cross context virtual CPU structure.
15848 * @param cbInstr The instruction length in bytes.
15849 * @param u16Port The port to read.
15850 * @param cbReg The register size.
15851 *
15852 * @remarks In ring-0 not all of the state needs to be synced in.
15853 */
15854VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15855{
15856 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15857 Assert(cbReg <= 4 && cbReg != 3);
15858
15859 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15860 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15861 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15862}
15863
15864
15865/**
15866 * Interface for rawmode to write execute an IN instruction.
15867 *
15868 * @returns Strict VBox status code.
15869 * @param pVCpu The cross context virtual CPU structure.
15870 * @param cbInstr The instruction length in bytes.
15871 * @param u16Port The port to read.
15872 * @param cbReg The register size.
15873 */
15874VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15875{
15876 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15877 Assert(cbReg <= 4 && cbReg != 3);
15878
15879 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15880 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15881 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15882}
15883
15884
15885/**
15886 * Interface for HM and EM to write to a CRx register.
15887 *
15888 * @returns Strict VBox status code.
15889 * @param pVCpu The cross context virtual CPU structure.
15890 * @param cbInstr The instruction length in bytes.
15891 * @param iCrReg The control register number (destination).
15892 * @param iGReg The general purpose register number (source).
15893 *
15894 * @remarks In ring-0 not all of the state needs to be synced in.
15895 */
15896VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15897{
15898 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15899 Assert(iCrReg < 16);
15900 Assert(iGReg < 16);
15901
15902 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15903 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15904 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15905}
15906
15907
15908/**
15909 * Interface for HM and EM to read from a CRx register.
15910 *
15911 * @returns Strict VBox status code.
15912 * @param pVCpu The cross context virtual CPU structure.
15913 * @param cbInstr The instruction length in bytes.
15914 * @param iGReg The general purpose register number (destination).
15915 * @param iCrReg The control register number (source).
15916 *
15917 * @remarks In ring-0 not all of the state needs to be synced in.
15918 */
15919VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15920{
15921 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15922 Assert(iCrReg < 16);
15923 Assert(iGReg < 16);
15924
15925 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15926 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15927 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15928}
15929
15930
15931/**
15932 * Interface for HM and EM to clear the CR0[TS] bit.
15933 *
15934 * @returns Strict VBox status code.
15935 * @param pVCpu The cross context virtual CPU structure.
15936 * @param cbInstr The instruction length in bytes.
15937 *
15938 * @remarks In ring-0 not all of the state needs to be synced in.
15939 */
15940VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15941{
15942 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15943
15944 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15945 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15946 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15947}
15948
15949
15950/**
15951 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15952 *
15953 * @returns Strict VBox status code.
15954 * @param pVCpu The cross context virtual CPU structure.
15955 * @param cbInstr The instruction length in bytes.
15956 * @param uValue The value to load into CR0.
15957 *
15958 * @remarks In ring-0 not all of the state needs to be synced in.
15959 */
15960VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15961{
15962 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15963
15964 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15965 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15966 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15967}
15968
15969
15970/**
15971 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15972 *
15973 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15974 *
15975 * @returns Strict VBox status code.
15976 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15977 * @param cbInstr The instruction length in bytes.
15978 * @remarks In ring-0 not all of the state needs to be synced in.
15979 * @thread EMT(pVCpu)
15980 */
15981VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15982{
15983 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15984
15985 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15986 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15988}
15989
15990
15991/**
15992 * Interface for HM and EM to emulate the INVLPG instruction.
15993 *
15994 * @param pVCpu The cross context virtual CPU structure.
15995 * @param cbInstr The instruction length in bytes.
15996 * @param GCPtrPage The effective address of the page to invalidate.
15997 *
15998 * @remarks In ring-0 not all of the state needs to be synced in.
15999 */
16000VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16001{
16002 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16003
16004 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16005 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16006 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16007}
16008
16009
16010/**
16011 * Checks if IEM is in the process of delivering an event (interrupt or
16012 * exception).
16013 *
16014 * @returns true if we're in the process of raising an interrupt or exception,
16015 * false otherwise.
16016 * @param pVCpu The cross context virtual CPU structure.
16017 * @param puVector Where to store the vector associated with the
16018 * currently delivered event, optional.
16019 * @param pfFlags Where to store th event delivery flags (see
16020 * IEM_XCPT_FLAGS_XXX), optional.
16021 * @param puErr Where to store the error code associated with the
16022 * event, optional.
16023 * @param puCr2 Where to store the CR2 associated with the event,
16024 * optional.
16025 * @remarks The caller should check the flags to determine if the error code and
16026 * CR2 are valid for the event.
16027 */
16028VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16029{
16030 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16031 if (fRaisingXcpt)
16032 {
16033 if (puVector)
16034 *puVector = pVCpu->iem.s.uCurXcpt;
16035 if (pfFlags)
16036 *pfFlags = pVCpu->iem.s.fCurXcpt;
16037 if (puErr)
16038 *puErr = pVCpu->iem.s.uCurXcptErr;
16039 if (puCr2)
16040 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16041 }
16042 return fRaisingXcpt;
16043}
16044
16045#ifdef VBOX_WITH_NESTED_HWVIRT
16046/**
16047 * Interface for HM and EM to emulate the CLGI instruction.
16048 *
16049 * @returns Strict VBox status code.
16050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16051 * @param cbInstr The instruction length in bytes.
16052 * @thread EMT(pVCpu)
16053 */
16054VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16055{
16056 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16057
16058 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16059 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16060 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16061}
16062
16063
16064/**
16065 * Interface for HM and EM to emulate the STGI instruction.
16066 *
16067 * @returns Strict VBox status code.
16068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16069 * @param cbInstr The instruction length in bytes.
16070 * @thread EMT(pVCpu)
16071 */
16072VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16073{
16074 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16075
16076 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16078 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16079}
16080
16081
16082/**
16083 * Interface for HM and EM to emulate the VMLOAD instruction.
16084 *
16085 * @returns Strict VBox status code.
16086 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16087 * @param cbInstr The instruction length in bytes.
16088 * @thread EMT(pVCpu)
16089 */
16090VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16091{
16092 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16093
16094 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16095 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16097}
16098
16099
16100/**
16101 * Interface for HM and EM to emulate the VMSAVE instruction.
16102 *
16103 * @returns Strict VBox status code.
16104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16105 * @param cbInstr The instruction length in bytes.
16106 * @thread EMT(pVCpu)
16107 */
16108VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16109{
16110 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16111
16112 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16113 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16114 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16115}
16116
16117
16118/**
16119 * Interface for HM and EM to emulate the INVLPGA instruction.
16120 *
16121 * @returns Strict VBox status code.
16122 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16123 * @param cbInstr The instruction length in bytes.
16124 * @thread EMT(pVCpu)
16125 */
16126VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16127{
16128 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16129
16130 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16131 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16133}
16134
16135
16136/**
16137 * Interface for HM and EM to emulate the VMRUN instruction.
16138 *
16139 * @returns Strict VBox status code.
16140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16141 * @param cbInstr The instruction length in bytes.
16142 * @thread EMT(pVCpu)
16143 */
16144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16145{
16146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16147
16148 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16149 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16151}
16152
16153
16154/**
16155 * Interface for HM and EM to emulate \#VMEXIT.
16156 *
16157 * @returns Strict VBox status code.
16158 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16159 * @param uExitCode The exit code.
16160 * @param uExitInfo1 The exit info. 1 field.
16161 * @param uExitInfo2 The exit info. 2 field.
16162 * @thread EMT(pVCpu)
16163 */
16164VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16165{
16166 return iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16167}
16168#endif /* VBOX_WITH_NESTED_HWVIRT */
16169
16170#ifdef IN_RING3
16171
16172/**
16173 * Handles the unlikely and probably fatal merge cases.
16174 *
16175 * @returns Merged status code.
16176 * @param rcStrict Current EM status code.
16177 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16178 * with @a rcStrict.
16179 * @param iMemMap The memory mapping index. For error reporting only.
16180 * @param pVCpu The cross context virtual CPU structure of the calling
16181 * thread, for error reporting only.
16182 */
16183DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16184 unsigned iMemMap, PVMCPU pVCpu)
16185{
16186 if (RT_FAILURE_NP(rcStrict))
16187 return rcStrict;
16188
16189 if (RT_FAILURE_NP(rcStrictCommit))
16190 return rcStrictCommit;
16191
16192 if (rcStrict == rcStrictCommit)
16193 return rcStrictCommit;
16194
16195 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16196 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16197 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16198 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16199 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16200 return VERR_IOM_FF_STATUS_IPE;
16201}
16202
16203
16204/**
16205 * Helper for IOMR3ProcessForceFlag.
16206 *
16207 * @returns Merged status code.
16208 * @param rcStrict Current EM status code.
16209 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16210 * with @a rcStrict.
16211 * @param iMemMap The memory mapping index. For error reporting only.
16212 * @param pVCpu The cross context virtual CPU structure of the calling
16213 * thread, for error reporting only.
16214 */
16215DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16216{
16217 /* Simple. */
16218 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16219 return rcStrictCommit;
16220
16221 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16222 return rcStrict;
16223
16224 /* EM scheduling status codes. */
16225 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16226 && rcStrict <= VINF_EM_LAST))
16227 {
16228 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16229 && rcStrictCommit <= VINF_EM_LAST))
16230 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16231 }
16232
16233 /* Unlikely */
16234 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16235}
16236
16237
16238/**
16239 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16240 *
16241 * @returns Merge between @a rcStrict and what the commit operation returned.
16242 * @param pVM The cross context VM structure.
16243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16244 * @param rcStrict The status code returned by ring-0 or raw-mode.
16245 */
16246VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16247{
16248 /*
16249 * Reset the pending commit.
16250 */
16251 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16252 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16253 ("%#x %#x %#x\n",
16254 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16255 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16256
16257 /*
16258 * Commit the pending bounce buffers (usually just one).
16259 */
16260 unsigned cBufs = 0;
16261 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16262 while (iMemMap-- > 0)
16263 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16264 {
16265 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16266 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16267 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16268
16269 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16270 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16271 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16272
16273 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16274 {
16275 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16276 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16277 pbBuf,
16278 cbFirst,
16279 PGMACCESSORIGIN_IEM);
16280 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16281 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16282 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16283 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16284 }
16285
16286 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16287 {
16288 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16289 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16290 pbBuf + cbFirst,
16291 cbSecond,
16292 PGMACCESSORIGIN_IEM);
16293 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16294 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16295 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16296 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16297 }
16298 cBufs++;
16299 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16300 }
16301
16302 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16303 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16304 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16305 pVCpu->iem.s.cActiveMappings = 0;
16306 return rcStrict;
16307}
16308
16309#endif /* IN_RING3 */
16310
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette