VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72128

Last change on this file since 72128 was 71857, checked in by vboxsync, 7 years ago

VMM/IEM: Nested hw.virt: Clean up.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 639.2 KB
Line 
1/* $Id: IEMAll.cpp 71857 2018-04-13 06:32:15Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
415 */
416# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
417 do { \
418 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
419 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
420 } while (0)
421
422/**
423 * Check if an SVM is enabled.
424 */
425# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
426
427/**
428 * Check if an SVM control/instruction intercept is set.
429 */
430# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
431
432/**
433 * Check if an SVM read CRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
436
437/**
438 * Check if an SVM write CRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
441
442/**
443 * Check if an SVM read DRx intercept is set.
444 */
445# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
446
447/**
448 * Check if an SVM write DRx intercept is set.
449 */
450# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
451
452/**
453 * Check if an SVM exception intercept is set.
454 */
455# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
456
457/**
458 * Get the SVM pause-filter count.
459 */
460# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
461
462/**
463 * Invokes the SVM \#VMEXIT handler for the nested-guest.
464 */
465# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
466 do \
467 { \
468 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
469 } while (0)
470
471/**
472 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
473 * corresponding decode assist information.
474 */
475# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
476 do \
477 { \
478 uint64_t uExitInfo1; \
479 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
480 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
481 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
482 else \
483 uExitInfo1 = 0; \
484 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
485 } while (0)
486
487#else
488# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
489# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
490# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
491# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
492# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
493# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
494# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
496# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
497# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
498# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
499# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
500
501#endif /* VBOX_WITH_NESTED_HWVIRT */
502
503
504/*********************************************************************************************************************************
505* Global Variables *
506*********************************************************************************************************************************/
507extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
508
509
510/** Function table for the ADD instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
512{
513 iemAImpl_add_u8, iemAImpl_add_u8_locked,
514 iemAImpl_add_u16, iemAImpl_add_u16_locked,
515 iemAImpl_add_u32, iemAImpl_add_u32_locked,
516 iemAImpl_add_u64, iemAImpl_add_u64_locked
517};
518
519/** Function table for the ADC instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
521{
522 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
523 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
524 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
525 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
526};
527
528/** Function table for the SUB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
530{
531 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
532 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
533 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
534 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
535};
536
537/** Function table for the SBB instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
539{
540 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
541 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
542 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
543 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
544};
545
546/** Function table for the OR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
548{
549 iemAImpl_or_u8, iemAImpl_or_u8_locked,
550 iemAImpl_or_u16, iemAImpl_or_u16_locked,
551 iemAImpl_or_u32, iemAImpl_or_u32_locked,
552 iemAImpl_or_u64, iemAImpl_or_u64_locked
553};
554
555/** Function table for the XOR instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
557{
558 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
559 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
560 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
561 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
562};
563
564/** Function table for the AND instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
566{
567 iemAImpl_and_u8, iemAImpl_and_u8_locked,
568 iemAImpl_and_u16, iemAImpl_and_u16_locked,
569 iemAImpl_and_u32, iemAImpl_and_u32_locked,
570 iemAImpl_and_u64, iemAImpl_and_u64_locked
571};
572
573/** Function table for the CMP instruction.
574 * @remarks Making operand order ASSUMPTIONS.
575 */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
577{
578 iemAImpl_cmp_u8, NULL,
579 iemAImpl_cmp_u16, NULL,
580 iemAImpl_cmp_u32, NULL,
581 iemAImpl_cmp_u64, NULL
582};
583
584/** Function table for the TEST instruction.
585 * @remarks Making operand order ASSUMPTIONS.
586 */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
588{
589 iemAImpl_test_u8, NULL,
590 iemAImpl_test_u16, NULL,
591 iemAImpl_test_u32, NULL,
592 iemAImpl_test_u64, NULL
593};
594
595/** Function table for the BT instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
597{
598 NULL, NULL,
599 iemAImpl_bt_u16, NULL,
600 iemAImpl_bt_u32, NULL,
601 iemAImpl_bt_u64, NULL
602};
603
604/** Function table for the BTC instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
606{
607 NULL, NULL,
608 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
609 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
610 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
611};
612
613/** Function table for the BTR instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
615{
616 NULL, NULL,
617 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
618 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
619 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
620};
621
622/** Function table for the BTS instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
624{
625 NULL, NULL,
626 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
627 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
628 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
629};
630
631/** Function table for the BSF instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
633{
634 NULL, NULL,
635 iemAImpl_bsf_u16, NULL,
636 iemAImpl_bsf_u32, NULL,
637 iemAImpl_bsf_u64, NULL
638};
639
640/** Function table for the BSR instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
642{
643 NULL, NULL,
644 iemAImpl_bsr_u16, NULL,
645 iemAImpl_bsr_u32, NULL,
646 iemAImpl_bsr_u64, NULL
647};
648
649/** Function table for the IMUL instruction. */
650IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
651{
652 NULL, NULL,
653 iemAImpl_imul_two_u16, NULL,
654 iemAImpl_imul_two_u32, NULL,
655 iemAImpl_imul_two_u64, NULL
656};
657
658/** Group 1 /r lookup table. */
659IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
660{
661 &g_iemAImpl_add,
662 &g_iemAImpl_or,
663 &g_iemAImpl_adc,
664 &g_iemAImpl_sbb,
665 &g_iemAImpl_and,
666 &g_iemAImpl_sub,
667 &g_iemAImpl_xor,
668 &g_iemAImpl_cmp
669};
670
671/** Function table for the INC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
673{
674 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
675 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
676 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
677 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
678};
679
680/** Function table for the DEC instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
682{
683 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
684 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
685 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
686 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
687};
688
689/** Function table for the NEG instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
691{
692 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
693 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
694 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
695 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
696};
697
698/** Function table for the NOT instruction. */
699IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
700{
701 iemAImpl_not_u8, iemAImpl_not_u8_locked,
702 iemAImpl_not_u16, iemAImpl_not_u16_locked,
703 iemAImpl_not_u32, iemAImpl_not_u32_locked,
704 iemAImpl_not_u64, iemAImpl_not_u64_locked
705};
706
707
708/** Function table for the ROL instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
710{
711 iemAImpl_rol_u8,
712 iemAImpl_rol_u16,
713 iemAImpl_rol_u32,
714 iemAImpl_rol_u64
715};
716
717/** Function table for the ROR instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
719{
720 iemAImpl_ror_u8,
721 iemAImpl_ror_u16,
722 iemAImpl_ror_u32,
723 iemAImpl_ror_u64
724};
725
726/** Function table for the RCL instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
728{
729 iemAImpl_rcl_u8,
730 iemAImpl_rcl_u16,
731 iemAImpl_rcl_u32,
732 iemAImpl_rcl_u64
733};
734
735/** Function table for the RCR instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
737{
738 iemAImpl_rcr_u8,
739 iemAImpl_rcr_u16,
740 iemAImpl_rcr_u32,
741 iemAImpl_rcr_u64
742};
743
744/** Function table for the SHL instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
746{
747 iemAImpl_shl_u8,
748 iemAImpl_shl_u16,
749 iemAImpl_shl_u32,
750 iemAImpl_shl_u64
751};
752
753/** Function table for the SHR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
755{
756 iemAImpl_shr_u8,
757 iemAImpl_shr_u16,
758 iemAImpl_shr_u32,
759 iemAImpl_shr_u64
760};
761
762/** Function table for the SAR instruction. */
763IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
764{
765 iemAImpl_sar_u8,
766 iemAImpl_sar_u16,
767 iemAImpl_sar_u32,
768 iemAImpl_sar_u64
769};
770
771
772/** Function table for the MUL instruction. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
774{
775 iemAImpl_mul_u8,
776 iemAImpl_mul_u16,
777 iemAImpl_mul_u32,
778 iemAImpl_mul_u64
779};
780
781/** Function table for the IMUL instruction working implicitly on rAX. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
783{
784 iemAImpl_imul_u8,
785 iemAImpl_imul_u16,
786 iemAImpl_imul_u32,
787 iemAImpl_imul_u64
788};
789
790/** Function table for the DIV instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
792{
793 iemAImpl_div_u8,
794 iemAImpl_div_u16,
795 iemAImpl_div_u32,
796 iemAImpl_div_u64
797};
798
799/** Function table for the MUL instruction. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
801{
802 iemAImpl_idiv_u8,
803 iemAImpl_idiv_u16,
804 iemAImpl_idiv_u32,
805 iemAImpl_idiv_u64
806};
807
808/** Function table for the SHLD instruction */
809IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
810{
811 iemAImpl_shld_u16,
812 iemAImpl_shld_u32,
813 iemAImpl_shld_u64,
814};
815
816/** Function table for the SHRD instruction */
817IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
818{
819 iemAImpl_shrd_u16,
820 iemAImpl_shrd_u32,
821 iemAImpl_shrd_u64,
822};
823
824
825/** Function table for the PUNPCKLBW instruction */
826IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
827/** Function table for the PUNPCKLBD instruction */
828IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
829/** Function table for the PUNPCKLDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
831/** Function table for the PUNPCKLQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
833
834/** Function table for the PUNPCKHBW instruction */
835IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
836/** Function table for the PUNPCKHBD instruction */
837IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
838/** Function table for the PUNPCKHDQ instruction */
839IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
840/** Function table for the PUNPCKHQDQ instruction */
841IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
842
843/** Function table for the PXOR instruction */
844IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
845/** Function table for the PCMPEQB instruction */
846IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
847/** Function table for the PCMPEQW instruction */
848IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
849/** Function table for the PCMPEQD instruction */
850IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
851
852
853#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
854/** What IEM just wrote. */
855uint8_t g_abIemWrote[256];
856/** How much IEM just wrote. */
857size_t g_cbIemWrote;
858#endif
859
860
861/*********************************************************************************************************************************
862* Internal Functions *
863*********************************************************************************************************************************/
864IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
867IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
868/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
870IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
871IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
872IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
873IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
874IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
875IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
876IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
877IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
878IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
879IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
880IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
881#ifdef IEM_WITH_SETJMP
882DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
883DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
885DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
886DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
887#endif
888
889IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
890IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
891IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
892IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
893IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
894IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
895IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
897IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
898IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
899IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
900IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
901IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
902IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
903IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
904IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
905IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
906
907#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
909#endif
910IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
911IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
912
913#ifdef VBOX_WITH_NESTED_HWVIRT
914IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
915 uint64_t uExitInfo2);
916IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
917 uint32_t uErr, uint64_t uCr2);
918#endif
919
920/**
921 * Sets the pass up status.
922 *
923 * @returns VINF_SUCCESS.
924 * @param pVCpu The cross context virtual CPU structure of the
925 * calling thread.
926 * @param rcPassUp The pass up status. Must be informational.
927 * VINF_SUCCESS is not allowed.
928 */
929IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
930{
931 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
932
933 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
934 if (rcOldPassUp == VINF_SUCCESS)
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 /* If both are EM scheduling codes, use EM priority rules. */
937 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
938 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
939 {
940 if (rcPassUp < rcOldPassUp)
941 {
942 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
943 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
944 }
945 else
946 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
947 }
948 /* Override EM scheduling with specific status code. */
949 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
950 {
951 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
952 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
953 }
954 /* Don't override specific status code, first come first served. */
955 else
956 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
957 return VINF_SUCCESS;
958}
959
960
961/**
962 * Calculates the CPU mode.
963 *
964 * This is mainly for updating IEMCPU::enmCpuMode.
965 *
966 * @returns CPU mode.
967 * @param pCtx The register context for the CPU.
968 */
969DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
970{
971 if (CPUMIsGuestIn64BitCodeEx(pCtx))
972 return IEMMODE_64BIT;
973 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
974 return IEMMODE_32BIT;
975 return IEMMODE_16BIT;
976}
977
978
979/**
980 * Initializes the execution state.
981 *
982 * @param pVCpu The cross context virtual CPU structure of the
983 * calling thread.
984 * @param fBypassHandlers Whether to bypass access handlers.
985 *
986 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
987 * side-effects in strict builds.
988 */
989DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
990{
991 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
992
993 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
994
995#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1004#endif
1005
1006#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1007 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1008#endif
1009 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1010 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1011#ifdef VBOX_STRICT
1012 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1013 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1014 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1015 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1016 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1017 pVCpu->iem.s.uRexReg = 127;
1018 pVCpu->iem.s.uRexB = 127;
1019 pVCpu->iem.s.uRexIndex = 127;
1020 pVCpu->iem.s.iEffSeg = 127;
1021 pVCpu->iem.s.idxPrefix = 127;
1022 pVCpu->iem.s.uVex3rdReg = 127;
1023 pVCpu->iem.s.uVexLength = 127;
1024 pVCpu->iem.s.fEvexStuff = 127;
1025 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1026# ifdef IEM_WITH_CODE_TLB
1027 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1028 pVCpu->iem.s.pbInstrBuf = NULL;
1029 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1030 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1031 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1032 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1033# else
1034 pVCpu->iem.s.offOpcode = 127;
1035 pVCpu->iem.s.cbOpcode = 127;
1036# endif
1037#endif
1038
1039 pVCpu->iem.s.cActiveMappings = 0;
1040 pVCpu->iem.s.iNextMapping = 0;
1041 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1042 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1043#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1044 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1045 && pCtx->cs.u64Base == 0
1046 && pCtx->cs.u32Limit == UINT32_MAX
1047 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1048 if (!pVCpu->iem.s.fInPatchCode)
1049 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1050#endif
1051
1052#ifdef IEM_VERIFICATION_MODE_FULL
1053 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1054 pVCpu->iem.s.fNoRem = true;
1055#endif
1056}
1057
1058#ifdef VBOX_WITH_NESTED_HWVIRT
1059/**
1060 * Performs a minimal reinitialization of the execution state.
1061 *
1062 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1063 * 'world-switch' types operations on the CPU. Currently only nested
1064 * hardware-virtualization uses it.
1065 *
1066 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1067 */
1068IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1069{
1070 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1071 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1072 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1073
1074 pVCpu->iem.s.uCpl = uCpl;
1075 pVCpu->iem.s.enmCpuMode = enmMode;
1076 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1077 pVCpu->iem.s.enmEffAddrMode = enmMode;
1078 if (enmMode != IEMMODE_64BIT)
1079 {
1080 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1081 pVCpu->iem.s.enmEffOpSize = enmMode;
1082 }
1083 else
1084 {
1085 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1086 pVCpu->iem.s.enmEffOpSize = enmMode;
1087 }
1088 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1089#ifndef IEM_WITH_CODE_TLB
1090 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1091 pVCpu->iem.s.offOpcode = 0;
1092 pVCpu->iem.s.cbOpcode = 0;
1093#endif
1094}
1095#endif
1096
1097/**
1098 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure of the
1101 * calling thread.
1102 */
1103DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1104{
1105 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1106#ifdef IEM_VERIFICATION_MODE_FULL
1107 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1108#endif
1109#ifdef VBOX_STRICT
1110# ifdef IEM_WITH_CODE_TLB
1111 NOREF(pVCpu);
1112# else
1113 pVCpu->iem.s.cbOpcode = 0;
1114# endif
1115#else
1116 NOREF(pVCpu);
1117#endif
1118}
1119
1120
1121/**
1122 * Initializes the decoder state.
1123 *
1124 * iemReInitDecoder is mostly a copy of this function.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure of the
1127 * calling thread.
1128 * @param fBypassHandlers Whether to bypass access handlers.
1129 */
1130DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1131{
1132 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1133
1134 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1135
1136#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1145#endif
1146
1147#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1148 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1149#endif
1150 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1151#ifdef IEM_VERIFICATION_MODE_FULL
1152 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1153 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1154#endif
1155 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1156 pVCpu->iem.s.enmCpuMode = enmMode;
1157 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffAddrMode = enmMode;
1159 if (enmMode != IEMMODE_64BIT)
1160 {
1161 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 else
1165 {
1166 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1167 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1168 }
1169 pVCpu->iem.s.fPrefixes = 0;
1170 pVCpu->iem.s.uRexReg = 0;
1171 pVCpu->iem.s.uRexB = 0;
1172 pVCpu->iem.s.uRexIndex = 0;
1173 pVCpu->iem.s.idxPrefix = 0;
1174 pVCpu->iem.s.uVex3rdReg = 0;
1175 pVCpu->iem.s.uVexLength = 0;
1176 pVCpu->iem.s.fEvexStuff = 0;
1177 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1178#ifdef IEM_WITH_CODE_TLB
1179 pVCpu->iem.s.pbInstrBuf = NULL;
1180 pVCpu->iem.s.offInstrNextByte = 0;
1181 pVCpu->iem.s.offCurInstrStart = 0;
1182# ifdef VBOX_STRICT
1183 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1184 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1185 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1186# endif
1187#else
1188 pVCpu->iem.s.offOpcode = 0;
1189 pVCpu->iem.s.cbOpcode = 0;
1190#endif
1191 pVCpu->iem.s.cActiveMappings = 0;
1192 pVCpu->iem.s.iNextMapping = 0;
1193 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1194 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202#endif
1203
1204#ifdef DBGFTRACE_ENABLED
1205 switch (enmMode)
1206 {
1207 case IEMMODE_64BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1209 break;
1210 case IEMMODE_32BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 case IEMMODE_16BIT:
1214 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1215 break;
1216 }
1217#endif
1218}
1219
1220
1221/**
1222 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1223 *
1224 * This is mostly a copy of iemInitDecoder.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1227 */
1228DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1229{
1230 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1231
1232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1233
1234#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1243#endif
1244
1245 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1246#ifdef IEM_VERIFICATION_MODE_FULL
1247 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1248 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1249#endif
1250 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1251 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1252 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1253 pVCpu->iem.s.enmEffAddrMode = enmMode;
1254 if (enmMode != IEMMODE_64BIT)
1255 {
1256 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1257 pVCpu->iem.s.enmEffOpSize = enmMode;
1258 }
1259 else
1260 {
1261 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1262 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1263 }
1264 pVCpu->iem.s.fPrefixes = 0;
1265 pVCpu->iem.s.uRexReg = 0;
1266 pVCpu->iem.s.uRexB = 0;
1267 pVCpu->iem.s.uRexIndex = 0;
1268 pVCpu->iem.s.idxPrefix = 0;
1269 pVCpu->iem.s.uVex3rdReg = 0;
1270 pVCpu->iem.s.uVexLength = 0;
1271 pVCpu->iem.s.fEvexStuff = 0;
1272 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1273#ifdef IEM_WITH_CODE_TLB
1274 if (pVCpu->iem.s.pbInstrBuf)
1275 {
1276 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1277 - pVCpu->iem.s.uInstrBufPc;
1278 if (off < pVCpu->iem.s.cbInstrBufTotal)
1279 {
1280 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1281 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1282 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1283 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1284 else
1285 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1286 }
1287 else
1288 {
1289 pVCpu->iem.s.pbInstrBuf = NULL;
1290 pVCpu->iem.s.offInstrNextByte = 0;
1291 pVCpu->iem.s.offCurInstrStart = 0;
1292 pVCpu->iem.s.cbInstrBuf = 0;
1293 pVCpu->iem.s.cbInstrBufTotal = 0;
1294 }
1295 }
1296 else
1297 {
1298 pVCpu->iem.s.offInstrNextByte = 0;
1299 pVCpu->iem.s.offCurInstrStart = 0;
1300 pVCpu->iem.s.cbInstrBuf = 0;
1301 pVCpu->iem.s.cbInstrBufTotal = 0;
1302 }
1303#else
1304 pVCpu->iem.s.cbOpcode = 0;
1305 pVCpu->iem.s.offOpcode = 0;
1306#endif
1307 Assert(pVCpu->iem.s.cActiveMappings == 0);
1308 pVCpu->iem.s.iNextMapping = 0;
1309 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1310 Assert(pVCpu->iem.s.fBypassHandlers == false);
1311#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1312 if (!pVCpu->iem.s.fInPatchCode)
1313 { /* likely */ }
1314 else
1315 {
1316 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1317 && pCtx->cs.u64Base == 0
1318 && pCtx->cs.u32Limit == UINT32_MAX
1319 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1320 if (!pVCpu->iem.s.fInPatchCode)
1321 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1322 }
1323#endif
1324
1325#ifdef DBGFTRACE_ENABLED
1326 switch (enmMode)
1327 {
1328 case IEMMODE_64BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1330 break;
1331 case IEMMODE_32BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 case IEMMODE_16BIT:
1335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1336 break;
1337 }
1338#endif
1339}
1340
1341
1342
1343/**
1344 * Prefetch opcodes the first time when starting executing.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pVCpu The cross context virtual CPU structure of the
1348 * calling thread.
1349 * @param fBypassHandlers Whether to bypass access handlers.
1350 */
1351IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1352{
1353#ifdef IEM_VERIFICATION_MODE_FULL
1354 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1355#endif
1356 iemInitDecoder(pVCpu, fBypassHandlers);
1357
1358#ifdef IEM_WITH_CODE_TLB
1359 /** @todo Do ITLB lookup here. */
1360
1361#else /* !IEM_WITH_CODE_TLB */
1362
1363 /*
1364 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1365 *
1366 * First translate CS:rIP to a physical address.
1367 */
1368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1369 uint32_t cbToTryRead;
1370 RTGCPTR GCPtrPC;
1371 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1372 {
1373 cbToTryRead = PAGE_SIZE;
1374 GCPtrPC = pCtx->rip;
1375 if (IEM_IS_CANONICAL(GCPtrPC))
1376 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1377 else
1378 return iemRaiseGeneralProtectionFault0(pVCpu);
1379 }
1380 else
1381 {
1382 uint32_t GCPtrPC32 = pCtx->eip;
1383 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1384 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1385 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1386 else
1387 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1388 if (cbToTryRead) { /* likely */ }
1389 else /* overflowed */
1390 {
1391 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1392 cbToTryRead = UINT32_MAX;
1393 }
1394 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1395 Assert(GCPtrPC <= UINT32_MAX);
1396 }
1397
1398# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1399 /* Allow interpretation of patch manager code blocks since they can for
1400 instance throw #PFs for perfectly good reasons. */
1401 if (pVCpu->iem.s.fInPatchCode)
1402 {
1403 size_t cbRead = 0;
1404 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1405 AssertRCReturn(rc, rc);
1406 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1407 return VINF_SUCCESS;
1408 }
1409# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1410
1411 RTGCPHYS GCPhys;
1412 uint64_t fFlags;
1413 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1414 if (RT_SUCCESS(rc)) { /* probable */ }
1415 else
1416 {
1417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1418 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1419 }
1420 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1421 else
1422 {
1423 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1424 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1425 }
1426 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1427 else
1428 {
1429 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1430 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1431 }
1432 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1433 /** @todo Check reserved bits and such stuff. PGM is better at doing
1434 * that, so do it when implementing the guest virtual address
1435 * TLB... */
1436
1437# ifdef IEM_VERIFICATION_MODE_FULL
1438 /*
1439 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1440 * instruction.
1441 */
1442 /** @todo optimize this differently by not using PGMPhysRead. */
1443 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1444 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1445 if ( offPrevOpcodes < cbOldOpcodes
1446 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1447 {
1448 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1449 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1450 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1451 pVCpu->iem.s.cbOpcode = cbNew;
1452 return VINF_SUCCESS;
1453 }
1454# endif
1455
1456 /*
1457 * Read the bytes at this address.
1458 */
1459 PVM pVM = pVCpu->CTX_SUFF(pVM);
1460# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1461 size_t cbActual;
1462 if ( PATMIsEnabled(pVM)
1463 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1464 {
1465 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1466 Assert(cbActual > 0);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1468 }
1469 else
1470# endif
1471 {
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511 }
1512#endif /* !IEM_WITH_CODE_TLB */
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Invalidates the IEM TLBs.
1519 *
1520 * This is called internally as well as by PGM when moving GC mappings.
1521 *
1522 * @returns
1523 * @param pVCpu The cross context virtual CPU structure of the calling
1524 * thread.
1525 * @param fVmm Set when PGM calls us with a remapping.
1526 */
1527VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1528{
1529#ifdef IEM_WITH_CODE_TLB
1530 pVCpu->iem.s.cbInstrBufTotal = 0;
1531 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542
1543#ifdef IEM_WITH_DATA_TLB
1544 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1545 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1546 { /* very likely */ }
1547 else
1548 {
1549 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1550 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1551 while (i-- > 0)
1552 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1553 }
1554#endif
1555 NOREF(pVCpu); NOREF(fVmm);
1556}
1557
1558
1559/**
1560 * Invalidates a page in the TLBs.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 * @param GCPtr The address of the page to invalidate
1565 */
1566VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1567{
1568#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1569 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1572 uintptr_t idx = (uint8_t)GCPtr;
1573
1574# ifdef IEM_WITH_CODE_TLB
1575 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1576 {
1577 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1578 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1579 pVCpu->iem.s.cbInstrBufTotal = 0;
1580 }
1581# endif
1582
1583# ifdef IEM_WITH_DATA_TLB
1584 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1585 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1586# endif
1587#else
1588 NOREF(pVCpu); NOREF(GCPtr);
1589#endif
1590}
1591
1592
1593/**
1594 * Invalidates the host physical aspects of the IEM TLBs.
1595 *
1596 * This is called internally as well as by PGM when moving GC mappings.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 */
1601VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1602{
1603#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1604 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608# endif
1609 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1610 if (uTlbPhysRev != 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1613 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1614 }
1615 else
1616 {
1617 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619
1620 unsigned i;
1621# ifdef IEM_WITH_CODE_TLB
1622 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1623 while (i-- > 0)
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1626 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1627 }
1628# endif
1629# ifdef IEM_WITH_DATA_TLB
1630 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1631 while (i-- > 0)
1632 {
1633 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1634 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1635 }
1636# endif
1637 }
1638#else
1639 NOREF(pVCpu);
1640#endif
1641}
1642
1643
1644/**
1645 * Invalidates the host physical aspects of the IEM TLBs.
1646 *
1647 * This is called internally as well as by PGM when moving GC mappings.
1648 *
1649 * @param pVM The cross context VM structure.
1650 *
1651 * @remarks Caller holds the PGM lock.
1652 */
1653VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1654{
1655 RT_NOREF_PV(pVM);
1656}
1657
1658#ifdef IEM_WITH_CODE_TLB
1659
1660/**
1661 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1662 * failure and jumps.
1663 *
1664 * We end up here for a number of reasons:
1665 * - pbInstrBuf isn't yet initialized.
1666 * - Advancing beyond the buffer boundrary (e.g. cross page).
1667 * - Advancing beyond the CS segment limit.
1668 * - Fetching from non-mappable page (e.g. MMIO).
1669 *
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param pvDst Where to return the bytes.
1673 * @param cbDst Number of bytes to read.
1674 *
1675 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1676 */
1677IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1678{
1679#ifdef IN_RING3
1680//__debugbreak();
1681 for (;;)
1682 {
1683 Assert(cbDst <= 8);
1684 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1685
1686 /*
1687 * We might have a partial buffer match, deal with that first to make the
1688 * rest simpler. This is the first part of the cross page/buffer case.
1689 */
1690 if (pVCpu->iem.s.pbInstrBuf != NULL)
1691 {
1692 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1693 {
1694 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1695 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1696 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1697
1698 cbDst -= cbCopy;
1699 pvDst = (uint8_t *)pvDst + cbCopy;
1700 offBuf += cbCopy;
1701 pVCpu->iem.s.offInstrNextByte += offBuf;
1702 }
1703 }
1704
1705 /*
1706 * Check segment limit, figuring how much we're allowed to access at this point.
1707 *
1708 * We will fault immediately if RIP is past the segment limit / in non-canonical
1709 * territory. If we do continue, there are one or more bytes to read before we
1710 * end up in trouble and we need to do that first before faulting.
1711 */
1712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1713 RTGCPTR GCPtrFirst;
1714 uint32_t cbMaxRead;
1715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1716 {
1717 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1718 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1719 { /* likely */ }
1720 else
1721 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1722 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1723 }
1724 else
1725 {
1726 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1727 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1728 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1729 { /* likely */ }
1730 else
1731 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1732 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1733 if (cbMaxRead != 0)
1734 { /* likely */ }
1735 else
1736 {
1737 /* Overflowed because address is 0 and limit is max. */
1738 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1739 cbMaxRead = X86_PAGE_SIZE;
1740 }
1741 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1742 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1743 if (cbMaxRead2 < cbMaxRead)
1744 cbMaxRead = cbMaxRead2;
1745 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1746 }
1747
1748 /*
1749 * Get the TLB entry for this piece of code.
1750 */
1751 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1752 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1753 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1754 if (pTlbe->uTag == uTag)
1755 {
1756 /* likely when executing lots of code, otherwise unlikely */
1757# ifdef VBOX_WITH_STATISTICS
1758 pVCpu->iem.s.CodeTlb.cTlbHits++;
1759# endif
1760 }
1761 else
1762 {
1763 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1764# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1765 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1766 {
1767 pTlbe->uTag = uTag;
1768 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1769 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1770 pTlbe->GCPhys = NIL_RTGCPHYS;
1771 pTlbe->pbMappingR3 = NULL;
1772 }
1773 else
1774# endif
1775 {
1776 RTGCPHYS GCPhys;
1777 uint64_t fFlags;
1778 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1779 if (RT_FAILURE(rc))
1780 {
1781 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1782 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1783 }
1784
1785 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1786 pTlbe->uTag = uTag;
1787 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1788 pTlbe->GCPhys = GCPhys;
1789 pTlbe->pbMappingR3 = NULL;
1790 }
1791 }
1792
1793 /*
1794 * Check TLB page table level access flags.
1795 */
1796 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1797 {
1798 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1799 {
1800 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1801 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1802 }
1803 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1804 {
1805 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1806 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1807 }
1808 }
1809
1810# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811 /*
1812 * Allow interpretation of patch manager code blocks since they can for
1813 * instance throw #PFs for perfectly good reasons.
1814 */
1815 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1816 { /* no unlikely */ }
1817 else
1818 {
1819 /** @todo Could be optimized this a little in ring-3 if we liked. */
1820 size_t cbRead = 0;
1821 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1822 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1823 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1824 return;
1825 }
1826# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1827
1828 /*
1829 * Look up the physical page info if necessary.
1830 */
1831 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 { /* not necessary */ }
1833 else
1834 {
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1836 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1837 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1838 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1839 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1840 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1841 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1842 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1843 }
1844
1845# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1846 /*
1847 * Try do a direct read using the pbMappingR3 pointer.
1848 */
1849 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1850 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1851 {
1852 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1853 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1854 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1855 {
1856 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1857 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1858 }
1859 else
1860 {
1861 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1862 Assert(cbInstr < cbMaxRead);
1863 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1864 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1865 }
1866 if (cbDst <= cbMaxRead)
1867 {
1868 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1869 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1870 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1871 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1872 return;
1873 }
1874 pVCpu->iem.s.pbInstrBuf = NULL;
1875
1876 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1877 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1878 }
1879 else
1880# endif
1881#if 0
1882 /*
1883 * If there is no special read handling, so we can read a bit more and
1884 * put it in the prefetch buffer.
1885 */
1886 if ( cbDst < cbMaxRead
1887 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1888 {
1889 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1890 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1892 { /* likely */ }
1893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1894 {
1895 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1896 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1898 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1899 }
1900 else
1901 {
1902 Log((RT_SUCCESS(rcStrict)
1903 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1904 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1905 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1906 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1907 }
1908 }
1909 /*
1910 * Special read handling, so only read exactly what's needed.
1911 * This is a highly unlikely scenario.
1912 */
1913 else
1914#endif
1915 {
1916 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1917 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1918 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1919 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1921 { /* likely */ }
1922 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1923 {
1924 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1925 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1926 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1927 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1928 }
1929 else
1930 {
1931 Log((RT_SUCCESS(rcStrict)
1932 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1933 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1934 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1935 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1936 }
1937 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1938 if (cbToRead == cbDst)
1939 return;
1940 }
1941
1942 /*
1943 * More to read, loop.
1944 */
1945 cbDst -= cbMaxRead;
1946 pvDst = (uint8_t *)pvDst + cbMaxRead;
1947 }
1948#else
1949 RT_NOREF(pvDst, cbDst);
1950 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1951#endif
1952}
1953
1954#else
1955
1956/**
1957 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1958 * exception if it fails.
1959 *
1960 * @returns Strict VBox status code.
1961 * @param pVCpu The cross context virtual CPU structure of the
1962 * calling thread.
1963 * @param cbMin The minimum number of bytes relative offOpcode
1964 * that must be read.
1965 */
1966IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1967{
1968 /*
1969 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1970 *
1971 * First translate CS:rIP to a physical address.
1972 */
1973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1974 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1975 uint32_t cbToTryRead;
1976 RTGCPTR GCPtrNext;
1977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1978 {
1979 cbToTryRead = PAGE_SIZE;
1980 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1981 if (!IEM_IS_CANONICAL(GCPtrNext))
1982 return iemRaiseGeneralProtectionFault0(pVCpu);
1983 }
1984 else
1985 {
1986 uint32_t GCPtrNext32 = pCtx->eip;
1987 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1988 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1989 if (GCPtrNext32 > pCtx->cs.u32Limit)
1990 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1991 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1992 if (!cbToTryRead) /* overflowed */
1993 {
1994 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1995 cbToTryRead = UINT32_MAX;
1996 /** @todo check out wrapping around the code segment. */
1997 }
1998 if (cbToTryRead < cbMin - cbLeft)
1999 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2000 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
2001 }
2002
2003 /* Only read up to the end of the page, and make sure we don't read more
2004 than the opcode buffer can hold. */
2005 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2006 if (cbToTryRead > cbLeftOnPage)
2007 cbToTryRead = cbLeftOnPage;
2008 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2009 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2010/** @todo r=bird: Convert assertion into undefined opcode exception? */
2011 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2012
2013# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2014 /* Allow interpretation of patch manager code blocks since they can for
2015 instance throw #PFs for perfectly good reasons. */
2016 if (pVCpu->iem.s.fInPatchCode)
2017 {
2018 size_t cbRead = 0;
2019 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2020 AssertRCReturn(rc, rc);
2021 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2022 return VINF_SUCCESS;
2023 }
2024# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2025
2026 RTGCPHYS GCPhys;
2027 uint64_t fFlags;
2028 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2029 if (RT_FAILURE(rc))
2030 {
2031 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2032 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2033 }
2034 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2035 {
2036 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2037 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2038 }
2039 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2040 {
2041 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2043 }
2044 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2046 /** @todo Check reserved bits and such stuff. PGM is better at doing
2047 * that, so do it when implementing the guest virtual address
2048 * TLB... */
2049
2050 /*
2051 * Read the bytes at this address.
2052 *
2053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2054 * and since PATM should only patch the start of an instruction there
2055 * should be no need to check again here.
2056 */
2057 if (!pVCpu->iem.s.fBypassHandlers)
2058 {
2059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2060 cbToTryRead, PGMACCESSORIGIN_IEM);
2061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2062 { /* likely */ }
2063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2068 }
2069 else
2070 {
2071 Log((RT_SUCCESS(rcStrict)
2072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2075 return rcStrict;
2076 }
2077 }
2078 else
2079 {
2080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2081 if (RT_SUCCESS(rc))
2082 { /* likely */ }
2083 else
2084 {
2085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2086 return rc;
2087 }
2088 }
2089 pVCpu->iem.s.cbOpcode += cbToTryRead;
2090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2091
2092 return VINF_SUCCESS;
2093}
2094
2095#endif /* !IEM_WITH_CODE_TLB */
2096#ifndef IEM_WITH_SETJMP
2097
2098/**
2099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2100 *
2101 * @returns Strict VBox status code.
2102 * @param pVCpu The cross context virtual CPU structure of the
2103 * calling thread.
2104 * @param pb Where to return the opcode byte.
2105 */
2106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2107{
2108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2109 if (rcStrict == VINF_SUCCESS)
2110 {
2111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2113 pVCpu->iem.s.offOpcode = offOpcode + 1;
2114 }
2115 else
2116 *pb = 0;
2117 return rcStrict;
2118}
2119
2120
2121/**
2122 * Fetches the next opcode byte.
2123 *
2124 * @returns Strict VBox status code.
2125 * @param pVCpu The cross context virtual CPU structure of the
2126 * calling thread.
2127 * @param pu8 Where to return the opcode byte.
2128 */
2129DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2130{
2131 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2132 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2133 {
2134 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2135 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2136 return VINF_SUCCESS;
2137 }
2138 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2139}
2140
2141#else /* IEM_WITH_SETJMP */
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2145 *
2146 * @returns The opcode byte.
2147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2148 */
2149DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2150{
2151# ifdef IEM_WITH_CODE_TLB
2152 uint8_t u8;
2153 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2154 return u8;
2155# else
2156 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2157 if (rcStrict == VINF_SUCCESS)
2158 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2160# endif
2161}
2162
2163
2164/**
2165 * Fetches the next opcode byte, longjmp on error.
2166 *
2167 * @returns The opcode byte.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 */
2170DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2171{
2172# ifdef IEM_WITH_CODE_TLB
2173 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2174 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2175 if (RT_LIKELY( pbBuf != NULL
2176 && offBuf < pVCpu->iem.s.cbInstrBuf))
2177 {
2178 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2179 return pbBuf[offBuf];
2180 }
2181# else
2182 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2183 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2184 {
2185 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2186 return pVCpu->iem.s.abOpcode[offOpcode];
2187 }
2188# endif
2189 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2190}
2191
2192#endif /* IEM_WITH_SETJMP */
2193
2194/**
2195 * Fetches the next opcode byte, returns automatically on failure.
2196 *
2197 * @param a_pu8 Where to return the opcode byte.
2198 * @remark Implicitly references pVCpu.
2199 */
2200#ifndef IEM_WITH_SETJMP
2201# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2202 do \
2203 { \
2204 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2205 if (rcStrict2 == VINF_SUCCESS) \
2206 { /* likely */ } \
2207 else \
2208 return rcStrict2; \
2209 } while (0)
2210#else
2211# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2212#endif /* IEM_WITH_SETJMP */
2213
2214
2215#ifndef IEM_WITH_SETJMP
2216/**
2217 * Fetches the next signed byte from the opcode stream.
2218 *
2219 * @returns Strict VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param pi8 Where to return the signed byte.
2222 */
2223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2224{
2225 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2226}
2227#endif /* !IEM_WITH_SETJMP */
2228
2229
2230/**
2231 * Fetches the next signed byte from the opcode stream, returning automatically
2232 * on failure.
2233 *
2234 * @param a_pi8 Where to return the signed byte.
2235 * @remark Implicitly references pVCpu.
2236 */
2237#ifndef IEM_WITH_SETJMP
2238# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2239 do \
2240 { \
2241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2242 if (rcStrict2 != VINF_SUCCESS) \
2243 return rcStrict2; \
2244 } while (0)
2245#else /* IEM_WITH_SETJMP */
2246# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2247
2248#endif /* IEM_WITH_SETJMP */
2249
2250#ifndef IEM_WITH_SETJMP
2251
2252/**
2253 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2254 *
2255 * @returns Strict VBox status code.
2256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2257 * @param pu16 Where to return the opcode dword.
2258 */
2259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2260{
2261 uint8_t u8;
2262 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2263 if (rcStrict == VINF_SUCCESS)
2264 *pu16 = (int8_t)u8;
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, extending it to
2271 * unsigned 16-bit.
2272 *
2273 * @returns Strict VBox status code.
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param pu16 Where to return the unsigned word.
2276 */
2277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2278{
2279 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2280 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2281 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2282
2283 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2284 pVCpu->iem.s.offOpcode = offOpcode + 1;
2285 return VINF_SUCCESS;
2286}
2287
2288#endif /* !IEM_WITH_SETJMP */
2289
2290/**
2291 * Fetches the next signed byte from the opcode stream and sign-extending it to
2292 * a word, returning automatically on failure.
2293 *
2294 * @param a_pu16 Where to return the word.
2295 * @remark Implicitly references pVCpu.
2296 */
2297#ifndef IEM_WITH_SETJMP
2298# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2299 do \
2300 { \
2301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2302 if (rcStrict2 != VINF_SUCCESS) \
2303 return rcStrict2; \
2304 } while (0)
2305#else
2306# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2307#endif
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu32 Where to return the opcode dword.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2319{
2320 uint8_t u8;
2321 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2322 if (rcStrict == VINF_SUCCESS)
2323 *pu32 = (int8_t)u8;
2324 return rcStrict;
2325}
2326
2327
2328/**
2329 * Fetches the next signed byte from the opcode stream, extending it to
2330 * unsigned 32-bit.
2331 *
2332 * @returns Strict VBox status code.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 * @param pu32 Where to return the unsigned dword.
2335 */
2336DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2337{
2338 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2339 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2340 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2341
2342 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2343 pVCpu->iem.s.offOpcode = offOpcode + 1;
2344 return VINF_SUCCESS;
2345}
2346
2347#endif /* !IEM_WITH_SETJMP */
2348
2349/**
2350 * Fetches the next signed byte from the opcode stream and sign-extending it to
2351 * a word, returning automatically on failure.
2352 *
2353 * @param a_pu32 Where to return the word.
2354 * @remark Implicitly references pVCpu.
2355 */
2356#ifndef IEM_WITH_SETJMP
2357#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2358 do \
2359 { \
2360 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2361 if (rcStrict2 != VINF_SUCCESS) \
2362 return rcStrict2; \
2363 } while (0)
2364#else
2365# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2366#endif
2367
2368#ifndef IEM_WITH_SETJMP
2369
2370/**
2371 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu64 Where to return the opcode qword.
2376 */
2377DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2378{
2379 uint8_t u8;
2380 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2381 if (rcStrict == VINF_SUCCESS)
2382 *pu64 = (int8_t)u8;
2383 return rcStrict;
2384}
2385
2386
2387/**
2388 * Fetches the next signed byte from the opcode stream, extending it to
2389 * unsigned 64-bit.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu64 Where to return the unsigned qword.
2394 */
2395DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2396{
2397 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2398 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2399 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2400
2401 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2402 pVCpu->iem.s.offOpcode = offOpcode + 1;
2403 return VINF_SUCCESS;
2404}
2405
2406#endif /* !IEM_WITH_SETJMP */
2407
2408
2409/**
2410 * Fetches the next signed byte from the opcode stream and sign-extending it to
2411 * a word, returning automatically on failure.
2412 *
2413 * @param a_pu64 Where to return the word.
2414 * @remark Implicitly references pVCpu.
2415 */
2416#ifndef IEM_WITH_SETJMP
2417# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2418 do \
2419 { \
2420 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2421 if (rcStrict2 != VINF_SUCCESS) \
2422 return rcStrict2; \
2423 } while (0)
2424#else
2425# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2426#endif
2427
2428
2429#ifndef IEM_WITH_SETJMP
2430
2431/**
2432 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2433 *
2434 * @returns Strict VBox status code.
2435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2436 * @param pu16 Where to return the opcode word.
2437 */
2438DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2439{
2440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2441 if (rcStrict == VINF_SUCCESS)
2442 {
2443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2444# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2445 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2446# else
2447 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2448# endif
2449 pVCpu->iem.s.offOpcode = offOpcode + 2;
2450 }
2451 else
2452 *pu16 = 0;
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * Fetches the next opcode word.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 * @param pu16 Where to return the opcode word.
2463 */
2464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2465{
2466 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2467 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2468 {
2469 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2471 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2472# else
2473 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2474# endif
2475 return VINF_SUCCESS;
2476 }
2477 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2478}
2479
2480#else /* IEM_WITH_SETJMP */
2481
2482/**
2483 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2484 *
2485 * @returns The opcode word.
2486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2489{
2490# ifdef IEM_WITH_CODE_TLB
2491 uint16_t u16;
2492 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2493 return u16;
2494# else
2495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2496 if (rcStrict == VINF_SUCCESS)
2497 {
2498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2499 pVCpu->iem.s.offOpcode += 2;
2500# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2501 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2502# else
2503 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2504# endif
2505 }
2506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2507# endif
2508}
2509
2510
2511/**
2512 * Fetches the next opcode word, longjmp on error.
2513 *
2514 * @returns The opcode word.
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 */
2517DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2518{
2519# ifdef IEM_WITH_CODE_TLB
2520 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2521 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2522 if (RT_LIKELY( pbBuf != NULL
2523 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2524 {
2525 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2527 return *(uint16_t const *)&pbBuf[offBuf];
2528# else
2529 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2530# endif
2531 }
2532# else
2533 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2535 {
2536 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2537# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2538 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2539# else
2540 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2541# endif
2542 }
2543# endif
2544 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2545}
2546
2547#endif /* IEM_WITH_SETJMP */
2548
2549
2550/**
2551 * Fetches the next opcode word, returns automatically on failure.
2552 *
2553 * @param a_pu16 Where to return the opcode word.
2554 * @remark Implicitly references pVCpu.
2555 */
2556#ifndef IEM_WITH_SETJMP
2557# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2558 do \
2559 { \
2560 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2561 if (rcStrict2 != VINF_SUCCESS) \
2562 return rcStrict2; \
2563 } while (0)
2564#else
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2566#endif
2567
2568#ifndef IEM_WITH_SETJMP
2569
2570/**
2571 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2572 *
2573 * @returns Strict VBox status code.
2574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2575 * @param pu32 Where to return the opcode double word.
2576 */
2577DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2578{
2579 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2580 if (rcStrict == VINF_SUCCESS)
2581 {
2582 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2583 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584 pVCpu->iem.s.offOpcode = offOpcode + 2;
2585 }
2586 else
2587 *pu32 = 0;
2588 return rcStrict;
2589}
2590
2591
2592/**
2593 * Fetches the next opcode word, zero extending it to a double word.
2594 *
2595 * @returns Strict VBox status code.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 * @param pu32 Where to return the opcode double word.
2598 */
2599DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2600{
2601 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2602 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2603 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2604
2605 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2606 pVCpu->iem.s.offOpcode = offOpcode + 2;
2607 return VINF_SUCCESS;
2608}
2609
2610#endif /* !IEM_WITH_SETJMP */
2611
2612
2613/**
2614 * Fetches the next opcode word and zero extends it to a double word, returns
2615 * automatically on failure.
2616 *
2617 * @param a_pu32 Where to return the opcode double word.
2618 * @remark Implicitly references pVCpu.
2619 */
2620#ifndef IEM_WITH_SETJMP
2621# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2622 do \
2623 { \
2624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2625 if (rcStrict2 != VINF_SUCCESS) \
2626 return rcStrict2; \
2627 } while (0)
2628#else
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2630#endif
2631
2632#ifndef IEM_WITH_SETJMP
2633
2634/**
2635 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pu64 Where to return the opcode quad word.
2640 */
2641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2642{
2643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2644 if (rcStrict == VINF_SUCCESS)
2645 {
2646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2647 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2648 pVCpu->iem.s.offOpcode = offOpcode + 2;
2649 }
2650 else
2651 *pu64 = 0;
2652 return rcStrict;
2653}
2654
2655
2656/**
2657 * Fetches the next opcode word, zero extending it to a quad word.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu64 Where to return the opcode quad word.
2662 */
2663DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2664{
2665 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2666 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2667 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2668
2669 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2670 pVCpu->iem.s.offOpcode = offOpcode + 2;
2671 return VINF_SUCCESS;
2672}
2673
2674#endif /* !IEM_WITH_SETJMP */
2675
2676/**
2677 * Fetches the next opcode word and zero extends it to a quad word, returns
2678 * automatically on failure.
2679 *
2680 * @param a_pu64 Where to return the opcode quad word.
2681 * @remark Implicitly references pVCpu.
2682 */
2683#ifndef IEM_WITH_SETJMP
2684# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2685 do \
2686 { \
2687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2688 if (rcStrict2 != VINF_SUCCESS) \
2689 return rcStrict2; \
2690 } while (0)
2691#else
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2693#endif
2694
2695
2696#ifndef IEM_WITH_SETJMP
2697/**
2698 * Fetches the next signed word from the opcode stream.
2699 *
2700 * @returns Strict VBox status code.
2701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2702 * @param pi16 Where to return the signed word.
2703 */
2704DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2705{
2706 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2707}
2708#endif /* !IEM_WITH_SETJMP */
2709
2710
2711/**
2712 * Fetches the next signed word from the opcode stream, returning automatically
2713 * on failure.
2714 *
2715 * @param a_pi16 Where to return the signed word.
2716 * @remark Implicitly references pVCpu.
2717 */
2718#ifndef IEM_WITH_SETJMP
2719# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2720 do \
2721 { \
2722 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2723 if (rcStrict2 != VINF_SUCCESS) \
2724 return rcStrict2; \
2725 } while (0)
2726#else
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2728#endif
2729
2730#ifndef IEM_WITH_SETJMP
2731
2732/**
2733 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2734 *
2735 * @returns Strict VBox status code.
2736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2737 * @param pu32 Where to return the opcode dword.
2738 */
2739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2740{
2741 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2742 if (rcStrict == VINF_SUCCESS)
2743 {
2744 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2745# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2746 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2747# else
2748 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2749 pVCpu->iem.s.abOpcode[offOpcode + 1],
2750 pVCpu->iem.s.abOpcode[offOpcode + 2],
2751 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2752# endif
2753 pVCpu->iem.s.offOpcode = offOpcode + 4;
2754 }
2755 else
2756 *pu32 = 0;
2757 return rcStrict;
2758}
2759
2760
2761/**
2762 * Fetches the next opcode dword.
2763 *
2764 * @returns Strict VBox status code.
2765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2766 * @param pu32 Where to return the opcode double word.
2767 */
2768DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2769{
2770 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2771 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2772 {
2773 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2774# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2775 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2776# else
2777 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2778 pVCpu->iem.s.abOpcode[offOpcode + 1],
2779 pVCpu->iem.s.abOpcode[offOpcode + 2],
2780 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2781# endif
2782 return VINF_SUCCESS;
2783 }
2784 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2785}
2786
2787#else /* !IEM_WITH_SETJMP */
2788
2789/**
2790 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2791 *
2792 * @returns The opcode dword.
2793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2796{
2797# ifdef IEM_WITH_CODE_TLB
2798 uint32_t u32;
2799 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2800 return u32;
2801# else
2802 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2803 if (rcStrict == VINF_SUCCESS)
2804 {
2805 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2806 pVCpu->iem.s.offOpcode = offOpcode + 4;
2807# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2808 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2809# else
2810 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2811 pVCpu->iem.s.abOpcode[offOpcode + 1],
2812 pVCpu->iem.s.abOpcode[offOpcode + 2],
2813 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2814# endif
2815 }
2816 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2817# endif
2818}
2819
2820
2821/**
2822 * Fetches the next opcode dword, longjmp on error.
2823 *
2824 * @returns The opcode dword.
2825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2826 */
2827DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2828{
2829# ifdef IEM_WITH_CODE_TLB
2830 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2831 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2832 if (RT_LIKELY( pbBuf != NULL
2833 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2834 {
2835 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2836# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2837 return *(uint32_t const *)&pbBuf[offBuf];
2838# else
2839 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2840 pbBuf[offBuf + 1],
2841 pbBuf[offBuf + 2],
2842 pbBuf[offBuf + 3]);
2843# endif
2844 }
2845# else
2846 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2847 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2848 {
2849 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2854 pVCpu->iem.s.abOpcode[offOpcode + 1],
2855 pVCpu->iem.s.abOpcode[offOpcode + 2],
2856 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2857# endif
2858 }
2859# endif
2860 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2861}
2862
2863#endif /* !IEM_WITH_SETJMP */
2864
2865
2866/**
2867 * Fetches the next opcode dword, returns automatically on failure.
2868 *
2869 * @param a_pu32 Where to return the opcode dword.
2870 * @remark Implicitly references pVCpu.
2871 */
2872#ifndef IEM_WITH_SETJMP
2873# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2874 do \
2875 { \
2876 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2877 if (rcStrict2 != VINF_SUCCESS) \
2878 return rcStrict2; \
2879 } while (0)
2880#else
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2882#endif
2883
2884#ifndef IEM_WITH_SETJMP
2885
2886/**
2887 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param pu64 Where to return the opcode dword.
2892 */
2893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2894{
2895 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2896 if (rcStrict == VINF_SUCCESS)
2897 {
2898 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2899 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2900 pVCpu->iem.s.abOpcode[offOpcode + 1],
2901 pVCpu->iem.s.abOpcode[offOpcode + 2],
2902 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2903 pVCpu->iem.s.offOpcode = offOpcode + 4;
2904 }
2905 else
2906 *pu64 = 0;
2907 return rcStrict;
2908}
2909
2910
2911/**
2912 * Fetches the next opcode dword, zero extending it to a quad word.
2913 *
2914 * @returns Strict VBox status code.
2915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2916 * @param pu64 Where to return the opcode quad word.
2917 */
2918DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2919{
2920 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2921 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2922 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2923
2924 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2925 pVCpu->iem.s.abOpcode[offOpcode + 1],
2926 pVCpu->iem.s.abOpcode[offOpcode + 2],
2927 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2928 pVCpu->iem.s.offOpcode = offOpcode + 4;
2929 return VINF_SUCCESS;
2930}
2931
2932#endif /* !IEM_WITH_SETJMP */
2933
2934
2935/**
2936 * Fetches the next opcode dword and zero extends it to a quad word, returns
2937 * automatically on failure.
2938 *
2939 * @param a_pu64 Where to return the opcode quad word.
2940 * @remark Implicitly references pVCpu.
2941 */
2942#ifndef IEM_WITH_SETJMP
2943# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2944 do \
2945 { \
2946 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2947 if (rcStrict2 != VINF_SUCCESS) \
2948 return rcStrict2; \
2949 } while (0)
2950#else
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2952#endif
2953
2954
2955#ifndef IEM_WITH_SETJMP
2956/**
2957 * Fetches the next signed double word from the opcode stream.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pi32 Where to return the signed double word.
2962 */
2963DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2964{
2965 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2966}
2967#endif
2968
2969/**
2970 * Fetches the next signed double word from the opcode stream, returning
2971 * automatically on failure.
2972 *
2973 * @param a_pi32 Where to return the signed double word.
2974 * @remark Implicitly references pVCpu.
2975 */
2976#ifndef IEM_WITH_SETJMP
2977# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2978 do \
2979 { \
2980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2981 if (rcStrict2 != VINF_SUCCESS) \
2982 return rcStrict2; \
2983 } while (0)
2984#else
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2986#endif
2987
2988#ifndef IEM_WITH_SETJMP
2989
2990/**
2991 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2992 *
2993 * @returns Strict VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param pu64 Where to return the opcode qword.
2996 */
2997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2998{
2999 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3003 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3004 pVCpu->iem.s.abOpcode[offOpcode + 1],
3005 pVCpu->iem.s.abOpcode[offOpcode + 2],
3006 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3007 pVCpu->iem.s.offOpcode = offOpcode + 4;
3008 }
3009 else
3010 *pu64 = 0;
3011 return rcStrict;
3012}
3013
3014
3015/**
3016 * Fetches the next opcode dword, sign extending it into a quad word.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 * @param pu64 Where to return the opcode quad word.
3021 */
3022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3023{
3024 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3025 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3026 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3027
3028 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3029 pVCpu->iem.s.abOpcode[offOpcode + 1],
3030 pVCpu->iem.s.abOpcode[offOpcode + 2],
3031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3032 *pu64 = i32;
3033 pVCpu->iem.s.offOpcode = offOpcode + 4;
3034 return VINF_SUCCESS;
3035}
3036
3037#endif /* !IEM_WITH_SETJMP */
3038
3039
3040/**
3041 * Fetches the next opcode double word and sign extends it to a quad word,
3042 * returns automatically on failure.
3043 *
3044 * @param a_pu64 Where to return the opcode quad word.
3045 * @remark Implicitly references pVCpu.
3046 */
3047#ifndef IEM_WITH_SETJMP
3048# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3049 do \
3050 { \
3051 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3052 if (rcStrict2 != VINF_SUCCESS) \
3053 return rcStrict2; \
3054 } while (0)
3055#else
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3057#endif
3058
3059#ifndef IEM_WITH_SETJMP
3060
3061/**
3062 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3063 *
3064 * @returns Strict VBox status code.
3065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3066 * @param pu64 Where to return the opcode qword.
3067 */
3068DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3069{
3070 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3071 if (rcStrict == VINF_SUCCESS)
3072 {
3073 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3074# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3075 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3076# else
3077 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3078 pVCpu->iem.s.abOpcode[offOpcode + 1],
3079 pVCpu->iem.s.abOpcode[offOpcode + 2],
3080 pVCpu->iem.s.abOpcode[offOpcode + 3],
3081 pVCpu->iem.s.abOpcode[offOpcode + 4],
3082 pVCpu->iem.s.abOpcode[offOpcode + 5],
3083 pVCpu->iem.s.abOpcode[offOpcode + 6],
3084 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3085# endif
3086 pVCpu->iem.s.offOpcode = offOpcode + 8;
3087 }
3088 else
3089 *pu64 = 0;
3090 return rcStrict;
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 * @param pu64 Where to return the opcode qword.
3100 */
3101DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3102{
3103 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3104 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3105 {
3106# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3107 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3108# else
3109 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3110 pVCpu->iem.s.abOpcode[offOpcode + 1],
3111 pVCpu->iem.s.abOpcode[offOpcode + 2],
3112 pVCpu->iem.s.abOpcode[offOpcode + 3],
3113 pVCpu->iem.s.abOpcode[offOpcode + 4],
3114 pVCpu->iem.s.abOpcode[offOpcode + 5],
3115 pVCpu->iem.s.abOpcode[offOpcode + 6],
3116 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3117# endif
3118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3119 return VINF_SUCCESS;
3120 }
3121 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3122}
3123
3124#else /* IEM_WITH_SETJMP */
3125
3126/**
3127 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3128 *
3129 * @returns The opcode qword.
3130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3131 */
3132DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3133{
3134# ifdef IEM_WITH_CODE_TLB
3135 uint64_t u64;
3136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3137 return u64;
3138# else
3139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3140 if (rcStrict == VINF_SUCCESS)
3141 {
3142 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3143 pVCpu->iem.s.offOpcode = offOpcode + 8;
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 }
3157 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3158# endif
3159}
3160
3161
3162/**
3163 * Fetches the next opcode qword, longjmp on error.
3164 *
3165 * @returns The opcode qword.
3166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3167 */
3168DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3169{
3170# ifdef IEM_WITH_CODE_TLB
3171 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3172 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3173 if (RT_LIKELY( pbBuf != NULL
3174 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3175 {
3176 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3178 return *(uint64_t const *)&pbBuf[offBuf];
3179# else
3180 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3181 pbBuf[offBuf + 1],
3182 pbBuf[offBuf + 2],
3183 pbBuf[offBuf + 3],
3184 pbBuf[offBuf + 4],
3185 pbBuf[offBuf + 5],
3186 pbBuf[offBuf + 6],
3187 pbBuf[offBuf + 7]);
3188# endif
3189 }
3190# else
3191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3193 {
3194 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3195# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3196 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3197# else
3198 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3199 pVCpu->iem.s.abOpcode[offOpcode + 1],
3200 pVCpu->iem.s.abOpcode[offOpcode + 2],
3201 pVCpu->iem.s.abOpcode[offOpcode + 3],
3202 pVCpu->iem.s.abOpcode[offOpcode + 4],
3203 pVCpu->iem.s.abOpcode[offOpcode + 5],
3204 pVCpu->iem.s.abOpcode[offOpcode + 6],
3205 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3206# endif
3207 }
3208# endif
3209 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3210}
3211
3212#endif /* IEM_WITH_SETJMP */
3213
3214/**
3215 * Fetches the next opcode quad word, returns automatically on failure.
3216 *
3217 * @param a_pu64 Where to return the opcode quad word.
3218 * @remark Implicitly references pVCpu.
3219 */
3220#ifndef IEM_WITH_SETJMP
3221# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3222 do \
3223 { \
3224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3225 if (rcStrict2 != VINF_SUCCESS) \
3226 return rcStrict2; \
3227 } while (0)
3228#else
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3230#endif
3231
3232
3233/** @name Misc Worker Functions.
3234 * @{
3235 */
3236
3237/**
3238 * Gets the exception class for the specified exception vector.
3239 *
3240 * @returns The class of the specified exception.
3241 * @param uVector The exception vector.
3242 */
3243IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3244{
3245 Assert(uVector <= X86_XCPT_LAST);
3246 switch (uVector)
3247 {
3248 case X86_XCPT_DE:
3249 case X86_XCPT_TS:
3250 case X86_XCPT_NP:
3251 case X86_XCPT_SS:
3252 case X86_XCPT_GP:
3253 case X86_XCPT_SX: /* AMD only */
3254 return IEMXCPTCLASS_CONTRIBUTORY;
3255
3256 case X86_XCPT_PF:
3257 case X86_XCPT_VE: /* Intel only */
3258 return IEMXCPTCLASS_PAGE_FAULT;
3259
3260 case X86_XCPT_DF:
3261 return IEMXCPTCLASS_DOUBLE_FAULT;
3262 }
3263 return IEMXCPTCLASS_BENIGN;
3264}
3265
3266
3267/**
3268 * Evaluates how to handle an exception caused during delivery of another event
3269 * (exception / interrupt).
3270 *
3271 * @returns How to handle the recursive exception.
3272 * @param pVCpu The cross context virtual CPU structure of the
3273 * calling thread.
3274 * @param fPrevFlags The flags of the previous event.
3275 * @param uPrevVector The vector of the previous event.
3276 * @param fCurFlags The flags of the current exception.
3277 * @param uCurVector The vector of the current exception.
3278 * @param pfXcptRaiseInfo Where to store additional information about the
3279 * exception condition. Optional.
3280 */
3281VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3282 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3283{
3284 /*
3285 * Only CPU exceptions can be raised while delivering other events, software interrupt
3286 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3287 */
3288 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3289 Assert(pVCpu); RT_NOREF(pVCpu);
3290 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3291
3292 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3293 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3294 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3295 {
3296 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3297 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3298 {
3299 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3300 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3302 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3303 {
3304 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3305 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3306 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3307 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3308 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3309 }
3310 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3312 {
3313 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3315 }
3316 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3317 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3319 {
3320 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3322 }
3323 }
3324 else
3325 {
3326 if (uPrevVector == X86_XCPT_NMI)
3327 {
3328 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3329 if (uCurVector == X86_XCPT_PF)
3330 {
3331 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3332 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3333 }
3334 }
3335 else if ( uPrevVector == X86_XCPT_AC
3336 && uCurVector == X86_XCPT_AC)
3337 {
3338 enmRaise = IEMXCPTRAISE_CPU_HANG;
3339 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3340 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3341 }
3342 }
3343 }
3344 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3345 {
3346 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3347 if (uCurVector == X86_XCPT_PF)
3348 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3349 }
3350 else
3351 {
3352 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3353 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3354 }
3355
3356 if (pfXcptRaiseInfo)
3357 *pfXcptRaiseInfo = fRaiseInfo;
3358 return enmRaise;
3359}
3360
3361
3362/**
3363 * Enters the CPU shutdown state initiated by a triple fault or other
3364 * unrecoverable conditions.
3365 *
3366 * @returns Strict VBox status code.
3367 * @param pVCpu The cross context virtual CPU structure of the
3368 * calling thread.
3369 */
3370IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3371{
3372 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3373 {
3374 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3375 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3376 }
3377
3378 RT_NOREF(pVCpu);
3379 return VINF_EM_TRIPLE_FAULT;
3380}
3381
3382
3383/**
3384 * Validates a new SS segment.
3385 *
3386 * @returns VBox strict status code.
3387 * @param pVCpu The cross context virtual CPU structure of the
3388 * calling thread.
3389 * @param pCtx The CPU context.
3390 * @param NewSS The new SS selctor.
3391 * @param uCpl The CPL to load the stack for.
3392 * @param pDesc Where to return the descriptor.
3393 */
3394IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3395{
3396 NOREF(pCtx);
3397
3398 /* Null selectors are not allowed (we're not called for dispatching
3399 interrupts with SS=0 in long mode). */
3400 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3401 {
3402 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3403 return iemRaiseTaskSwitchFault0(pVCpu);
3404 }
3405
3406 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3407 if ((NewSS & X86_SEL_RPL) != uCpl)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 /*
3414 * Read the descriptor.
3415 */
3416 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3417 if (rcStrict != VINF_SUCCESS)
3418 return rcStrict;
3419
3420 /*
3421 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3422 */
3423 if (!pDesc->Legacy.Gen.u1DescType)
3424 {
3425 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3426 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3427 }
3428
3429 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3430 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3436 {
3437 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3438 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3439 }
3440
3441 /* Is it there? */
3442 /** @todo testcase: Is this checked before the canonical / limit check below? */
3443 if (!pDesc->Legacy.Gen.u1Present)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3446 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3447 }
3448
3449 return VINF_SUCCESS;
3450}
3451
3452
3453/**
3454 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3455 * not.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3462 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3463 ? (a_pCtx)->eflags.u \
3464 : CPUMRawGetEFlags(a_pVCpu) )
3465#else
3466# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3467 ( (a_pCtx)->eflags.u )
3468#endif
3469
3470/**
3471 * Updates the EFLAGS in the correct manner wrt. PATM.
3472 *
3473 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3474 * @param a_pCtx The CPU context.
3475 * @param a_fEfl The new EFLAGS.
3476 */
3477#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3478# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3479 do { \
3480 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3481 (a_pCtx)->eflags.u = (a_fEfl); \
3482 else \
3483 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3484 } while (0)
3485#else
3486# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3487 do { \
3488 (a_pCtx)->eflags.u = (a_fEfl); \
3489 } while (0)
3490#endif
3491
3492
3493/** @} */
3494
3495/** @name Raising Exceptions.
3496 *
3497 * @{
3498 */
3499
3500
3501/**
3502 * Loads the specified stack far pointer from the TSS.
3503 *
3504 * @returns VBox strict status code.
3505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3506 * @param pCtx The CPU context.
3507 * @param uCpl The CPL to load the stack for.
3508 * @param pSelSS Where to return the new stack segment.
3509 * @param puEsp Where to return the new stack pointer.
3510 */
3511IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3512 PRTSEL pSelSS, uint32_t *puEsp)
3513{
3514 VBOXSTRICTRC rcStrict;
3515 Assert(uCpl < 4);
3516
3517 switch (pCtx->tr.Attr.n.u4Type)
3518 {
3519 /*
3520 * 16-bit TSS (X86TSS16).
3521 */
3522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3524 {
3525 uint32_t off = uCpl * 4 + 2;
3526 if (off + 4 <= pCtx->tr.u32Limit)
3527 {
3528 /** @todo check actual access pattern here. */
3529 uint32_t u32Tmp = 0; /* gcc maybe... */
3530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3531 if (rcStrict == VINF_SUCCESS)
3532 {
3533 *puEsp = RT_LOWORD(u32Tmp);
3534 *pSelSS = RT_HIWORD(u32Tmp);
3535 return VINF_SUCCESS;
3536 }
3537 }
3538 else
3539 {
3540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3542 }
3543 break;
3544 }
3545
3546 /*
3547 * 32-bit TSS (X86TSS32).
3548 */
3549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3551 {
3552 uint32_t off = uCpl * 8 + 4;
3553 if (off + 7 <= pCtx->tr.u32Limit)
3554 {
3555/** @todo check actual access pattern here. */
3556 uint64_t u64Tmp;
3557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3558 if (rcStrict == VINF_SUCCESS)
3559 {
3560 *puEsp = u64Tmp & UINT32_MAX;
3561 *pSelSS = (RTSEL)(u64Tmp >> 32);
3562 return VINF_SUCCESS;
3563 }
3564 }
3565 else
3566 {
3567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3569 }
3570 break;
3571 }
3572
3573 default:
3574 AssertFailed();
3575 rcStrict = VERR_IEM_IPE_4;
3576 break;
3577 }
3578
3579 *puEsp = 0; /* make gcc happy */
3580 *pSelSS = 0; /* make gcc happy */
3581 return rcStrict;
3582}
3583
3584
3585/**
3586 * Loads the specified stack pointer from the 64-bit TSS.
3587 *
3588 * @returns VBox strict status code.
3589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3590 * @param pCtx The CPU context.
3591 * @param uCpl The CPL to load the stack for.
3592 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3593 * @param puRsp Where to return the new stack pointer.
3594 */
3595IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3596{
3597 Assert(uCpl < 4);
3598 Assert(uIst < 8);
3599 *puRsp = 0; /* make gcc happy */
3600
3601 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3602
3603 uint32_t off;
3604 if (uIst)
3605 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3606 else
3607 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3608 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3609 {
3610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3612 }
3613
3614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3615}
3616
3617
3618/**
3619 * Adjust the CPU state according to the exception being raised.
3620 *
3621 * @param pCtx The CPU context.
3622 * @param u8Vector The exception that has been raised.
3623 */
3624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3625{
3626 switch (u8Vector)
3627 {
3628 case X86_XCPT_DB:
3629 pCtx->dr[7] &= ~X86_DR7_GD;
3630 break;
3631 /** @todo Read the AMD and Intel exception reference... */
3632 }
3633}
3634
3635
3636/**
3637 * Implements exceptions and interrupts for real mode.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param pCtx The CPU context.
3642 * @param cbInstr The number of bytes to offset rIP by in the return
3643 * address.
3644 * @param u8Vector The interrupt / exception vector number.
3645 * @param fFlags The flags.
3646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3648 */
3649IEM_STATIC VBOXSTRICTRC
3650iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3651 PCPUMCTX pCtx,
3652 uint8_t cbInstr,
3653 uint8_t u8Vector,
3654 uint32_t fFlags,
3655 uint16_t uErr,
3656 uint64_t uCr2)
3657{
3658 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3659 NOREF(uErr); NOREF(uCr2);
3660
3661 /*
3662 * Read the IDT entry.
3663 */
3664 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3665 {
3666 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3667 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3668 }
3669 RTFAR16 Idte;
3670 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3671 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3672 return rcStrict;
3673
3674 /*
3675 * Push the stack frame.
3676 */
3677 uint16_t *pu16Frame;
3678 uint64_t uNewRsp;
3679 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3680 if (rcStrict != VINF_SUCCESS)
3681 return rcStrict;
3682
3683 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3684#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3685 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3686 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3687 fEfl |= UINT16_C(0xf000);
3688#endif
3689 pu16Frame[2] = (uint16_t)fEfl;
3690 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3691 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3692 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3693 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3694 return rcStrict;
3695
3696 /*
3697 * Load the vector address into cs:ip and make exception specific state
3698 * adjustments.
3699 */
3700 pCtx->cs.Sel = Idte.sel;
3701 pCtx->cs.ValidSel = Idte.sel;
3702 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3703 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3704 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3705 pCtx->rip = Idte.off;
3706 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3707 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3708
3709 /** @todo do we actually do this in real mode? */
3710 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3711 iemRaiseXcptAdjustState(pCtx, u8Vector);
3712
3713 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3714}
3715
3716
3717/**
3718 * Loads a NULL data selector into when coming from V8086 mode.
3719 *
3720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3721 * @param pSReg Pointer to the segment register.
3722 */
3723IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3724{
3725 pSReg->Sel = 0;
3726 pSReg->ValidSel = 0;
3727 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3728 {
3729 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3730 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3731 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3732 }
3733 else
3734 {
3735 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3736 /** @todo check this on AMD-V */
3737 pSReg->u64Base = 0;
3738 pSReg->u32Limit = 0;
3739 }
3740}
3741
3742
3743/**
3744 * Loads a segment selector during a task switch in V8086 mode.
3745 *
3746 * @param pSReg Pointer to the segment register.
3747 * @param uSel The selector value to load.
3748 */
3749IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3750{
3751 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3752 pSReg->Sel = uSel;
3753 pSReg->ValidSel = uSel;
3754 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3755 pSReg->u64Base = uSel << 4;
3756 pSReg->u32Limit = 0xffff;
3757 pSReg->Attr.u = 0xf3;
3758}
3759
3760
3761/**
3762 * Loads a NULL data selector into a selector register, both the hidden and
3763 * visible parts, in protected mode.
3764 *
3765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3766 * @param pSReg Pointer to the segment register.
3767 * @param uRpl The RPL.
3768 */
3769IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3770{
3771 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3772 * data selector in protected mode. */
3773 pSReg->Sel = uRpl;
3774 pSReg->ValidSel = uRpl;
3775 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3776 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3777 {
3778 /* VT-x (Intel 3960x) observed doing something like this. */
3779 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3780 pSReg->u32Limit = UINT32_MAX;
3781 pSReg->u64Base = 0;
3782 }
3783 else
3784 {
3785 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3786 pSReg->u32Limit = 0;
3787 pSReg->u64Base = 0;
3788 }
3789}
3790
3791
3792/**
3793 * Loads a segment selector during a task switch in protected mode.
3794 *
3795 * In this task switch scenario, we would throw \#TS exceptions rather than
3796 * \#GPs.
3797 *
3798 * @returns VBox strict status code.
3799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3800 * @param pSReg Pointer to the segment register.
3801 * @param uSel The new selector value.
3802 *
3803 * @remarks This does _not_ handle CS or SS.
3804 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3805 */
3806IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3807{
3808 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3809
3810 /* Null data selector. */
3811 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3812 {
3813 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3814 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3815 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3816 return VINF_SUCCESS;
3817 }
3818
3819 /* Fetch the descriptor. */
3820 IEMSELDESC Desc;
3821 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3822 if (rcStrict != VINF_SUCCESS)
3823 {
3824 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3825 VBOXSTRICTRC_VAL(rcStrict)));
3826 return rcStrict;
3827 }
3828
3829 /* Must be a data segment or readable code segment. */
3830 if ( !Desc.Legacy.Gen.u1DescType
3831 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3832 {
3833 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3834 Desc.Legacy.Gen.u4Type));
3835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3836 }
3837
3838 /* Check privileges for data segments and non-conforming code segments. */
3839 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3840 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3841 {
3842 /* The RPL and the new CPL must be less than or equal to the DPL. */
3843 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3844 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3845 {
3846 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3847 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3848 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3849 }
3850 }
3851
3852 /* Is it there? */
3853 if (!Desc.Legacy.Gen.u1Present)
3854 {
3855 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3856 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3857 }
3858
3859 /* The base and limit. */
3860 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3861 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3862
3863 /*
3864 * Ok, everything checked out fine. Now set the accessed bit before
3865 * committing the result into the registers.
3866 */
3867 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3868 {
3869 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3870 if (rcStrict != VINF_SUCCESS)
3871 return rcStrict;
3872 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3873 }
3874
3875 /* Commit */
3876 pSReg->Sel = uSel;
3877 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3878 pSReg->u32Limit = cbLimit;
3879 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3880 pSReg->ValidSel = uSel;
3881 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3882 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3883 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3884
3885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3886 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3887 return VINF_SUCCESS;
3888}
3889
3890
3891/**
3892 * Performs a task switch.
3893 *
3894 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3895 * caller is responsible for performing the necessary checks (like DPL, TSS
3896 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3897 * reference for JMP, CALL, IRET.
3898 *
3899 * If the task switch is the due to a software interrupt or hardware exception,
3900 * the caller is responsible for validating the TSS selector and descriptor. See
3901 * Intel Instruction reference for INT n.
3902 *
3903 * @returns VBox strict status code.
3904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3905 * @param pCtx The CPU context.
3906 * @param enmTaskSwitch What caused this task switch.
3907 * @param uNextEip The EIP effective after the task switch.
3908 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3909 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3910 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3911 * @param SelTSS The TSS selector of the new task.
3912 * @param pNewDescTSS Pointer to the new TSS descriptor.
3913 */
3914IEM_STATIC VBOXSTRICTRC
3915iemTaskSwitch(PVMCPU pVCpu,
3916 PCPUMCTX pCtx,
3917 IEMTASKSWITCH enmTaskSwitch,
3918 uint32_t uNextEip,
3919 uint32_t fFlags,
3920 uint16_t uErr,
3921 uint64_t uCr2,
3922 RTSEL SelTSS,
3923 PIEMSELDESC pNewDescTSS)
3924{
3925 Assert(!IEM_IS_REAL_MODE(pVCpu));
3926 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3927
3928 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3929 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3930 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3931 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3932 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3933
3934 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3935 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3936
3937 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3938 fIsNewTSS386, pCtx->eip, uNextEip));
3939
3940 /* Update CR2 in case it's a page-fault. */
3941 /** @todo This should probably be done much earlier in IEM/PGM. See
3942 * @bugref{5653#c49}. */
3943 if (fFlags & IEM_XCPT_FLAGS_CR2)
3944 pCtx->cr2 = uCr2;
3945
3946 /*
3947 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3948 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3949 */
3950 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3951 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3952 if (uNewTSSLimit < uNewTSSLimitMin)
3953 {
3954 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3955 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3957 }
3958
3959 /*
3960 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3961 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3962 */
3963 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3964 {
3965 uint32_t const uExitInfo1 = SelTSS;
3966 uint32_t uExitInfo2 = uErr;
3967 switch (enmTaskSwitch)
3968 {
3969 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3970 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3971 default: break;
3972 }
3973 if (fFlags & IEM_XCPT_FLAGS_ERR)
3974 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3975 if (pCtx->eflags.Bits.u1RF)
3976 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3977
3978 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3979 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3980 RT_NOREF2(uExitInfo1, uExitInfo2);
3981 }
3982 /** @todo Nested-VMX task-switch intercept. */
3983
3984 /*
3985 * Check the current TSS limit. The last written byte to the current TSS during the
3986 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3987 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3988 *
3989 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3990 * end up with smaller than "legal" TSS limits.
3991 */
3992 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3993 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3994 if (uCurTSSLimit < uCurTSSLimitMin)
3995 {
3996 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3997 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3998 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3999 }
4000
4001 /*
4002 * Verify that the new TSS can be accessed and map it. Map only the required contents
4003 * and not the entire TSS.
4004 */
4005 void *pvNewTSS;
4006 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4007 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4008 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4009 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4010 * not perform correct translation if this happens. See Intel spec. 7.2.1
4011 * "Task-State Segment" */
4012 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4013 if (rcStrict != VINF_SUCCESS)
4014 {
4015 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4016 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4017 return rcStrict;
4018 }
4019
4020 /*
4021 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4022 */
4023 uint32_t u32EFlags = pCtx->eflags.u32;
4024 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4025 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4026 {
4027 PX86DESC pDescCurTSS;
4028 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4029 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4030 if (rcStrict != VINF_SUCCESS)
4031 {
4032 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4033 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4034 return rcStrict;
4035 }
4036
4037 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4038 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4039 if (rcStrict != VINF_SUCCESS)
4040 {
4041 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4042 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4043 return rcStrict;
4044 }
4045
4046 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4047 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4048 {
4049 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4050 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4051 u32EFlags &= ~X86_EFL_NT;
4052 }
4053 }
4054
4055 /*
4056 * Save the CPU state into the current TSS.
4057 */
4058 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4059 if (GCPtrNewTSS == GCPtrCurTSS)
4060 {
4061 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4062 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4063 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4064 }
4065 if (fIsNewTSS386)
4066 {
4067 /*
4068 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4069 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4070 */
4071 void *pvCurTSS32;
4072 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4073 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4074 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4075 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4076 if (rcStrict != VINF_SUCCESS)
4077 {
4078 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4079 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4080 return rcStrict;
4081 }
4082
4083 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4084 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4085 pCurTSS32->eip = uNextEip;
4086 pCurTSS32->eflags = u32EFlags;
4087 pCurTSS32->eax = pCtx->eax;
4088 pCurTSS32->ecx = pCtx->ecx;
4089 pCurTSS32->edx = pCtx->edx;
4090 pCurTSS32->ebx = pCtx->ebx;
4091 pCurTSS32->esp = pCtx->esp;
4092 pCurTSS32->ebp = pCtx->ebp;
4093 pCurTSS32->esi = pCtx->esi;
4094 pCurTSS32->edi = pCtx->edi;
4095 pCurTSS32->es = pCtx->es.Sel;
4096 pCurTSS32->cs = pCtx->cs.Sel;
4097 pCurTSS32->ss = pCtx->ss.Sel;
4098 pCurTSS32->ds = pCtx->ds.Sel;
4099 pCurTSS32->fs = pCtx->fs.Sel;
4100 pCurTSS32->gs = pCtx->gs.Sel;
4101
4102 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4103 if (rcStrict != VINF_SUCCESS)
4104 {
4105 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4106 VBOXSTRICTRC_VAL(rcStrict)));
4107 return rcStrict;
4108 }
4109 }
4110 else
4111 {
4112 /*
4113 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4114 */
4115 void *pvCurTSS16;
4116 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4117 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4118 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4119 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4120 if (rcStrict != VINF_SUCCESS)
4121 {
4122 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4123 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4124 return rcStrict;
4125 }
4126
4127 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4128 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4129 pCurTSS16->ip = uNextEip;
4130 pCurTSS16->flags = u32EFlags;
4131 pCurTSS16->ax = pCtx->ax;
4132 pCurTSS16->cx = pCtx->cx;
4133 pCurTSS16->dx = pCtx->dx;
4134 pCurTSS16->bx = pCtx->bx;
4135 pCurTSS16->sp = pCtx->sp;
4136 pCurTSS16->bp = pCtx->bp;
4137 pCurTSS16->si = pCtx->si;
4138 pCurTSS16->di = pCtx->di;
4139 pCurTSS16->es = pCtx->es.Sel;
4140 pCurTSS16->cs = pCtx->cs.Sel;
4141 pCurTSS16->ss = pCtx->ss.Sel;
4142 pCurTSS16->ds = pCtx->ds.Sel;
4143
4144 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4145 if (rcStrict != VINF_SUCCESS)
4146 {
4147 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4148 VBOXSTRICTRC_VAL(rcStrict)));
4149 return rcStrict;
4150 }
4151 }
4152
4153 /*
4154 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4155 */
4156 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4157 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4158 {
4159 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4160 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4161 pNewTSS->selPrev = pCtx->tr.Sel;
4162 }
4163
4164 /*
4165 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4166 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4167 */
4168 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4169 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4170 bool fNewDebugTrap;
4171 if (fIsNewTSS386)
4172 {
4173 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4174 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4175 uNewEip = pNewTSS32->eip;
4176 uNewEflags = pNewTSS32->eflags;
4177 uNewEax = pNewTSS32->eax;
4178 uNewEcx = pNewTSS32->ecx;
4179 uNewEdx = pNewTSS32->edx;
4180 uNewEbx = pNewTSS32->ebx;
4181 uNewEsp = pNewTSS32->esp;
4182 uNewEbp = pNewTSS32->ebp;
4183 uNewEsi = pNewTSS32->esi;
4184 uNewEdi = pNewTSS32->edi;
4185 uNewES = pNewTSS32->es;
4186 uNewCS = pNewTSS32->cs;
4187 uNewSS = pNewTSS32->ss;
4188 uNewDS = pNewTSS32->ds;
4189 uNewFS = pNewTSS32->fs;
4190 uNewGS = pNewTSS32->gs;
4191 uNewLdt = pNewTSS32->selLdt;
4192 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4193 }
4194 else
4195 {
4196 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4197 uNewCr3 = 0;
4198 uNewEip = pNewTSS16->ip;
4199 uNewEflags = pNewTSS16->flags;
4200 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4201 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4202 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4203 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4204 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4205 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4206 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4207 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4208 uNewES = pNewTSS16->es;
4209 uNewCS = pNewTSS16->cs;
4210 uNewSS = pNewTSS16->ss;
4211 uNewDS = pNewTSS16->ds;
4212 uNewFS = 0;
4213 uNewGS = 0;
4214 uNewLdt = pNewTSS16->selLdt;
4215 fNewDebugTrap = false;
4216 }
4217
4218 if (GCPtrNewTSS == GCPtrCurTSS)
4219 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4220 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4221
4222 /*
4223 * We're done accessing the new TSS.
4224 */
4225 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4226 if (rcStrict != VINF_SUCCESS)
4227 {
4228 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4229 return rcStrict;
4230 }
4231
4232 /*
4233 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4234 */
4235 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4236 {
4237 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4238 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4239 if (rcStrict != VINF_SUCCESS)
4240 {
4241 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4242 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4243 return rcStrict;
4244 }
4245
4246 /* Check that the descriptor indicates the new TSS is available (not busy). */
4247 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4248 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4249 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4250
4251 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4252 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4253 if (rcStrict != VINF_SUCCESS)
4254 {
4255 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4256 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4257 return rcStrict;
4258 }
4259 }
4260
4261 /*
4262 * From this point on, we're technically in the new task. We will defer exceptions
4263 * until the completion of the task switch but before executing any instructions in the new task.
4264 */
4265 pCtx->tr.Sel = SelTSS;
4266 pCtx->tr.ValidSel = SelTSS;
4267 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4268 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4269 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4270 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4271 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4272
4273 /* Set the busy bit in TR. */
4274 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4275 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4276 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4277 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4278 {
4279 uNewEflags |= X86_EFL_NT;
4280 }
4281
4282 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4283 pCtx->cr0 |= X86_CR0_TS;
4284 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4285
4286 pCtx->eip = uNewEip;
4287 pCtx->eax = uNewEax;
4288 pCtx->ecx = uNewEcx;
4289 pCtx->edx = uNewEdx;
4290 pCtx->ebx = uNewEbx;
4291 pCtx->esp = uNewEsp;
4292 pCtx->ebp = uNewEbp;
4293 pCtx->esi = uNewEsi;
4294 pCtx->edi = uNewEdi;
4295
4296 uNewEflags &= X86_EFL_LIVE_MASK;
4297 uNewEflags |= X86_EFL_RA1_MASK;
4298 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4299
4300 /*
4301 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4302 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4303 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4304 */
4305 pCtx->es.Sel = uNewES;
4306 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4307
4308 pCtx->cs.Sel = uNewCS;
4309 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4310
4311 pCtx->ss.Sel = uNewSS;
4312 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4313
4314 pCtx->ds.Sel = uNewDS;
4315 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4316
4317 pCtx->fs.Sel = uNewFS;
4318 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4319
4320 pCtx->gs.Sel = uNewGS;
4321 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4322 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4323
4324 pCtx->ldtr.Sel = uNewLdt;
4325 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4326 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4327 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4328
4329 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4330 {
4331 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4337 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4338 }
4339
4340 /*
4341 * Switch CR3 for the new task.
4342 */
4343 if ( fIsNewTSS386
4344 && (pCtx->cr0 & X86_CR0_PG))
4345 {
4346 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4347 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4348 {
4349 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4350 AssertRCSuccessReturn(rc, rc);
4351 }
4352 else
4353 pCtx->cr3 = uNewCr3;
4354
4355 /* Inform PGM. */
4356 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4357 {
4358 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4359 AssertRCReturn(rc, rc);
4360 /* ignore informational status codes */
4361 }
4362 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4363 }
4364
4365 /*
4366 * Switch LDTR for the new task.
4367 */
4368 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4369 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4370 else
4371 {
4372 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4373
4374 IEMSELDESC DescNewLdt;
4375 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4376 if (rcStrict != VINF_SUCCESS)
4377 {
4378 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4379 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4380 return rcStrict;
4381 }
4382 if ( !DescNewLdt.Legacy.Gen.u1Present
4383 || DescNewLdt.Legacy.Gen.u1DescType
4384 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4385 {
4386 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4387 uNewLdt, DescNewLdt.Legacy.u));
4388 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4389 }
4390
4391 pCtx->ldtr.ValidSel = uNewLdt;
4392 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4393 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4394 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4395 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4396 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4397 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4399 }
4400
4401 IEMSELDESC DescSS;
4402 if (IEM_IS_V86_MODE(pVCpu))
4403 {
4404 pVCpu->iem.s.uCpl = 3;
4405 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4406 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4407 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4408 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4409 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4410 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4411
4412 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4413 DescSS.Legacy.u = 0;
4414 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4415 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4416 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4417 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4418 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4419 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4420 DescSS.Legacy.Gen.u2Dpl = 3;
4421 }
4422 else
4423 {
4424 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4425
4426 /*
4427 * Load the stack segment for the new task.
4428 */
4429 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4430 {
4431 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4432 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4433 }
4434
4435 /* Fetch the descriptor. */
4436 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4437 if (rcStrict != VINF_SUCCESS)
4438 {
4439 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4440 VBOXSTRICTRC_VAL(rcStrict)));
4441 return rcStrict;
4442 }
4443
4444 /* SS must be a data segment and writable. */
4445 if ( !DescSS.Legacy.Gen.u1DescType
4446 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4447 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4448 {
4449 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4450 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4451 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4455 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4456 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4457 {
4458 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4459 uNewCpl));
4460 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4461 }
4462
4463 /* Is it there? */
4464 if (!DescSS.Legacy.Gen.u1Present)
4465 {
4466 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4467 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4468 }
4469
4470 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4471 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4472
4473 /* Set the accessed bit before committing the result into SS. */
4474 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4475 {
4476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4477 if (rcStrict != VINF_SUCCESS)
4478 return rcStrict;
4479 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4480 }
4481
4482 /* Commit SS. */
4483 pCtx->ss.Sel = uNewSS;
4484 pCtx->ss.ValidSel = uNewSS;
4485 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4486 pCtx->ss.u32Limit = cbLimit;
4487 pCtx->ss.u64Base = u64Base;
4488 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4489 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4490
4491 /* CPL has changed, update IEM before loading rest of segments. */
4492 pVCpu->iem.s.uCpl = uNewCpl;
4493
4494 /*
4495 * Load the data segments for the new task.
4496 */
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4501 if (rcStrict != VINF_SUCCESS)
4502 return rcStrict;
4503 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4504 if (rcStrict != VINF_SUCCESS)
4505 return rcStrict;
4506 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4507 if (rcStrict != VINF_SUCCESS)
4508 return rcStrict;
4509
4510 /*
4511 * Load the code segment for the new task.
4512 */
4513 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4514 {
4515 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4516 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4517 }
4518
4519 /* Fetch the descriptor. */
4520 IEMSELDESC DescCS;
4521 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4522 if (rcStrict != VINF_SUCCESS)
4523 {
4524 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4525 return rcStrict;
4526 }
4527
4528 /* CS must be a code segment. */
4529 if ( !DescCS.Legacy.Gen.u1DescType
4530 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4531 {
4532 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4533 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* For conforming CS, DPL must be less than or equal to the RPL. */
4538 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4539 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4540 {
4541 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4542 DescCS.Legacy.Gen.u2Dpl));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* For non-conforming CS, DPL must match RPL. */
4547 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4548 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4549 {
4550 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4551 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4552 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 /* Is it there? */
4556 if (!DescCS.Legacy.Gen.u1Present)
4557 {
4558 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4559 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4563 u64Base = X86DESC_BASE(&DescCS.Legacy);
4564
4565 /* Set the accessed bit before committing the result into CS. */
4566 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4567 {
4568 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4572 }
4573
4574 /* Commit CS. */
4575 pCtx->cs.Sel = uNewCS;
4576 pCtx->cs.ValidSel = uNewCS;
4577 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4578 pCtx->cs.u32Limit = cbLimit;
4579 pCtx->cs.u64Base = u64Base;
4580 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4582 }
4583
4584 /** @todo Debug trap. */
4585 if (fIsNewTSS386 && fNewDebugTrap)
4586 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4587
4588 /*
4589 * Construct the error code masks based on what caused this task switch.
4590 * See Intel Instruction reference for INT.
4591 */
4592 uint16_t uExt;
4593 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4594 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4595 {
4596 uExt = 1;
4597 }
4598 else
4599 uExt = 0;
4600
4601 /*
4602 * Push any error code on to the new stack.
4603 */
4604 if (fFlags & IEM_XCPT_FLAGS_ERR)
4605 {
4606 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4607 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4608 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4609
4610 /* Check that there is sufficient space on the stack. */
4611 /** @todo Factor out segment limit checking for normal/expand down segments
4612 * into a separate function. */
4613 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4614 {
4615 if ( pCtx->esp - 1 > cbLimitSS
4616 || pCtx->esp < cbStackFrame)
4617 {
4618 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4619 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4620 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4621 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4622 }
4623 }
4624 else
4625 {
4626 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4627 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4628 {
4629 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4630 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4631 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4632 }
4633 }
4634
4635
4636 if (fIsNewTSS386)
4637 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4638 else
4639 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4640 if (rcStrict != VINF_SUCCESS)
4641 {
4642 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4643 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4644 return rcStrict;
4645 }
4646 }
4647
4648 /* Check the new EIP against the new CS limit. */
4649 if (pCtx->eip > pCtx->cs.u32Limit)
4650 {
4651 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4652 pCtx->eip, pCtx->cs.u32Limit));
4653 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4654 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4655 }
4656
4657 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4658 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4659}
4660
4661
4662/**
4663 * Implements exceptions and interrupts for protected mode.
4664 *
4665 * @returns VBox strict status code.
4666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4667 * @param pCtx The CPU context.
4668 * @param cbInstr The number of bytes to offset rIP by in the return
4669 * address.
4670 * @param u8Vector The interrupt / exception vector number.
4671 * @param fFlags The flags.
4672 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4673 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4674 */
4675IEM_STATIC VBOXSTRICTRC
4676iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4677 PCPUMCTX pCtx,
4678 uint8_t cbInstr,
4679 uint8_t u8Vector,
4680 uint32_t fFlags,
4681 uint16_t uErr,
4682 uint64_t uCr2)
4683{
4684 /*
4685 * Read the IDT entry.
4686 */
4687 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4688 {
4689 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4690 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4691 }
4692 X86DESC Idte;
4693 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4694 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4695 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4696 return rcStrict;
4697 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4698 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4699 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4700
4701 /*
4702 * Check the descriptor type, DPL and such.
4703 * ASSUMES this is done in the same order as described for call-gate calls.
4704 */
4705 if (Idte.Gate.u1DescType)
4706 {
4707 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4708 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4709 }
4710 bool fTaskGate = false;
4711 uint8_t f32BitGate = true;
4712 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4713 switch (Idte.Gate.u4Type)
4714 {
4715 case X86_SEL_TYPE_SYS_UNDEFINED:
4716 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4717 case X86_SEL_TYPE_SYS_LDT:
4718 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4719 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4720 case X86_SEL_TYPE_SYS_UNDEFINED2:
4721 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4722 case X86_SEL_TYPE_SYS_UNDEFINED3:
4723 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4724 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4725 case X86_SEL_TYPE_SYS_UNDEFINED4:
4726 {
4727 /** @todo check what actually happens when the type is wrong...
4728 * esp. call gates. */
4729 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4730 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4731 }
4732
4733 case X86_SEL_TYPE_SYS_286_INT_GATE:
4734 f32BitGate = false;
4735 RT_FALL_THRU();
4736 case X86_SEL_TYPE_SYS_386_INT_GATE:
4737 fEflToClear |= X86_EFL_IF;
4738 break;
4739
4740 case X86_SEL_TYPE_SYS_TASK_GATE:
4741 fTaskGate = true;
4742#ifndef IEM_IMPLEMENTS_TASKSWITCH
4743 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4744#endif
4745 break;
4746
4747 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4748 f32BitGate = false;
4749 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4750 break;
4751
4752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4753 }
4754
4755 /* Check DPL against CPL if applicable. */
4756 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4757 {
4758 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4759 {
4760 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4761 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4762 }
4763 }
4764
4765 /* Is it there? */
4766 if (!Idte.Gate.u1Present)
4767 {
4768 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4769 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4770 }
4771
4772 /* Is it a task-gate? */
4773 if (fTaskGate)
4774 {
4775 /*
4776 * Construct the error code masks based on what caused this task switch.
4777 * See Intel Instruction reference for INT.
4778 */
4779 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4780 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4781 RTSEL SelTSS = Idte.Gate.u16Sel;
4782
4783 /*
4784 * Fetch the TSS descriptor in the GDT.
4785 */
4786 IEMSELDESC DescTSS;
4787 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4788 if (rcStrict != VINF_SUCCESS)
4789 {
4790 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4791 VBOXSTRICTRC_VAL(rcStrict)));
4792 return rcStrict;
4793 }
4794
4795 /* The TSS descriptor must be a system segment and be available (not busy). */
4796 if ( DescTSS.Legacy.Gen.u1DescType
4797 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4798 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4799 {
4800 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4801 u8Vector, SelTSS, DescTSS.Legacy.au64));
4802 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4803 }
4804
4805 /* The TSS must be present. */
4806 if (!DescTSS.Legacy.Gen.u1Present)
4807 {
4808 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4809 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4810 }
4811
4812 /* Do the actual task switch. */
4813 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4814 }
4815
4816 /* A null CS is bad. */
4817 RTSEL NewCS = Idte.Gate.u16Sel;
4818 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4821 return iemRaiseGeneralProtectionFault0(pVCpu);
4822 }
4823
4824 /* Fetch the descriptor for the new CS. */
4825 IEMSELDESC DescCS;
4826 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4827 if (rcStrict != VINF_SUCCESS)
4828 {
4829 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4830 return rcStrict;
4831 }
4832
4833 /* Must be a code segment. */
4834 if (!DescCS.Legacy.Gen.u1DescType)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4837 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4838 }
4839 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4842 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4843 }
4844
4845 /* Don't allow lowering the privilege level. */
4846 /** @todo Does the lowering of privileges apply to software interrupts
4847 * only? This has bearings on the more-privileged or
4848 * same-privilege stack behavior further down. A testcase would
4849 * be nice. */
4850 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4853 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4854 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4855 }
4856
4857 /* Make sure the selector is present. */
4858 if (!DescCS.Legacy.Gen.u1Present)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4861 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4862 }
4863
4864 /* Check the new EIP against the new CS limit. */
4865 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4866 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4867 ? Idte.Gate.u16OffsetLow
4868 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4869 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4870 if (uNewEip > cbLimitCS)
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4873 u8Vector, uNewEip, cbLimitCS, NewCS));
4874 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4875 }
4876 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4877
4878 /* Calc the flag image to push. */
4879 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4880 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4881 fEfl &= ~X86_EFL_RF;
4882 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4883 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4884
4885 /* From V8086 mode only go to CPL 0. */
4886 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4887 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4888 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4889 {
4890 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4891 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4892 }
4893
4894 /*
4895 * If the privilege level changes, we need to get a new stack from the TSS.
4896 * This in turns means validating the new SS and ESP...
4897 */
4898 if (uNewCpl != pVCpu->iem.s.uCpl)
4899 {
4900 RTSEL NewSS;
4901 uint32_t uNewEsp;
4902 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4903 if (rcStrict != VINF_SUCCESS)
4904 return rcStrict;
4905
4906 IEMSELDESC DescSS;
4907 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4908 if (rcStrict != VINF_SUCCESS)
4909 return rcStrict;
4910 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4911 if (!DescSS.Legacy.Gen.u1DefBig)
4912 {
4913 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4914 uNewEsp = (uint16_t)uNewEsp;
4915 }
4916
4917 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4918
4919 /* Check that there is sufficient space for the stack frame. */
4920 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4921 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4922 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4923 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4924
4925 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4926 {
4927 if ( uNewEsp - 1 > cbLimitSS
4928 || uNewEsp < cbStackFrame)
4929 {
4930 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4931 u8Vector, NewSS, uNewEsp, cbStackFrame));
4932 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4933 }
4934 }
4935 else
4936 {
4937 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4938 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4939 {
4940 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4941 u8Vector, NewSS, uNewEsp, cbStackFrame));
4942 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4943 }
4944 }
4945
4946 /*
4947 * Start making changes.
4948 */
4949
4950 /* Set the new CPL so that stack accesses use it. */
4951 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4952 pVCpu->iem.s.uCpl = uNewCpl;
4953
4954 /* Create the stack frame. */
4955 RTPTRUNION uStackFrame;
4956 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4957 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4958 if (rcStrict != VINF_SUCCESS)
4959 return rcStrict;
4960 void * const pvStackFrame = uStackFrame.pv;
4961 if (f32BitGate)
4962 {
4963 if (fFlags & IEM_XCPT_FLAGS_ERR)
4964 *uStackFrame.pu32++ = uErr;
4965 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4966 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4967 uStackFrame.pu32[2] = fEfl;
4968 uStackFrame.pu32[3] = pCtx->esp;
4969 uStackFrame.pu32[4] = pCtx->ss.Sel;
4970 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4971 if (fEfl & X86_EFL_VM)
4972 {
4973 uStackFrame.pu32[1] = pCtx->cs.Sel;
4974 uStackFrame.pu32[5] = pCtx->es.Sel;
4975 uStackFrame.pu32[6] = pCtx->ds.Sel;
4976 uStackFrame.pu32[7] = pCtx->fs.Sel;
4977 uStackFrame.pu32[8] = pCtx->gs.Sel;
4978 }
4979 }
4980 else
4981 {
4982 if (fFlags & IEM_XCPT_FLAGS_ERR)
4983 *uStackFrame.pu16++ = uErr;
4984 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4985 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4986 uStackFrame.pu16[2] = fEfl;
4987 uStackFrame.pu16[3] = pCtx->sp;
4988 uStackFrame.pu16[4] = pCtx->ss.Sel;
4989 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4990 if (fEfl & X86_EFL_VM)
4991 {
4992 uStackFrame.pu16[1] = pCtx->cs.Sel;
4993 uStackFrame.pu16[5] = pCtx->es.Sel;
4994 uStackFrame.pu16[6] = pCtx->ds.Sel;
4995 uStackFrame.pu16[7] = pCtx->fs.Sel;
4996 uStackFrame.pu16[8] = pCtx->gs.Sel;
4997 }
4998 }
4999 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5000 if (rcStrict != VINF_SUCCESS)
5001 return rcStrict;
5002
5003 /* Mark the selectors 'accessed' (hope this is the correct time). */
5004 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5005 * after pushing the stack frame? (Write protect the gdt + stack to
5006 * find out.) */
5007 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5008 {
5009 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5010 if (rcStrict != VINF_SUCCESS)
5011 return rcStrict;
5012 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5013 }
5014
5015 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 /*
5024 * Start comitting the register changes (joins with the DPL=CPL branch).
5025 */
5026 pCtx->ss.Sel = NewSS;
5027 pCtx->ss.ValidSel = NewSS;
5028 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5029 pCtx->ss.u32Limit = cbLimitSS;
5030 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5031 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5032 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5033 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5034 * SP is loaded).
5035 * Need to check the other combinations too:
5036 * - 16-bit TSS, 32-bit handler
5037 * - 32-bit TSS, 16-bit handler */
5038 if (!pCtx->ss.Attr.n.u1DefBig)
5039 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5040 else
5041 pCtx->rsp = uNewEsp - cbStackFrame;
5042
5043 if (fEfl & X86_EFL_VM)
5044 {
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5048 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5049 }
5050 }
5051 /*
5052 * Same privilege, no stack change and smaller stack frame.
5053 */
5054 else
5055 {
5056 uint64_t uNewRsp;
5057 RTPTRUNION uStackFrame;
5058 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5059 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5060 if (rcStrict != VINF_SUCCESS)
5061 return rcStrict;
5062 void * const pvStackFrame = uStackFrame.pv;
5063
5064 if (f32BitGate)
5065 {
5066 if (fFlags & IEM_XCPT_FLAGS_ERR)
5067 *uStackFrame.pu32++ = uErr;
5068 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5069 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5070 uStackFrame.pu32[2] = fEfl;
5071 }
5072 else
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu16++ = uErr;
5076 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5077 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu16[2] = fEfl;
5079 }
5080 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5081 if (rcStrict != VINF_SUCCESS)
5082 return rcStrict;
5083
5084 /* Mark the CS selector as 'accessed'. */
5085 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5086 {
5087 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5088 if (rcStrict != VINF_SUCCESS)
5089 return rcStrict;
5090 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5091 }
5092
5093 /*
5094 * Start committing the register changes (joins with the other branch).
5095 */
5096 pCtx->rsp = uNewRsp;
5097 }
5098
5099 /* ... register committing continues. */
5100 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5102 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5103 pCtx->cs.u32Limit = cbLimitCS;
5104 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5105 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5106
5107 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5108 fEfl &= ~fEflToClear;
5109 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5110
5111 if (fFlags & IEM_XCPT_FLAGS_CR2)
5112 pCtx->cr2 = uCr2;
5113
5114 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5115 iemRaiseXcptAdjustState(pCtx, u8Vector);
5116
5117 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5118}
5119
5120
5121/**
5122 * Implements exceptions and interrupts for long mode.
5123 *
5124 * @returns VBox strict status code.
5125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5126 * @param pCtx The CPU context.
5127 * @param cbInstr The number of bytes to offset rIP by in the return
5128 * address.
5129 * @param u8Vector The interrupt / exception vector number.
5130 * @param fFlags The flags.
5131 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5132 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5133 */
5134IEM_STATIC VBOXSTRICTRC
5135iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5136 PCPUMCTX pCtx,
5137 uint8_t cbInstr,
5138 uint8_t u8Vector,
5139 uint32_t fFlags,
5140 uint16_t uErr,
5141 uint64_t uCr2)
5142{
5143 /*
5144 * Read the IDT entry.
5145 */
5146 uint16_t offIdt = (uint16_t)u8Vector << 4;
5147 if (pCtx->idtr.cbIdt < offIdt + 7)
5148 {
5149 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5150 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5151 }
5152 X86DESC64 Idte;
5153 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5154 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5155 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5156 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5157 return rcStrict;
5158 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5159 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5160 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5161
5162 /*
5163 * Check the descriptor type, DPL and such.
5164 * ASSUMES this is done in the same order as described for call-gate calls.
5165 */
5166 if (Idte.Gate.u1DescType)
5167 {
5168 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5169 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5170 }
5171 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5172 switch (Idte.Gate.u4Type)
5173 {
5174 case AMD64_SEL_TYPE_SYS_INT_GATE:
5175 fEflToClear |= X86_EFL_IF;
5176 break;
5177 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5178 break;
5179
5180 default:
5181 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5183 }
5184
5185 /* Check DPL against CPL if applicable. */
5186 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5187 {
5188 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5189 {
5190 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5191 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5192 }
5193 }
5194
5195 /* Is it there? */
5196 if (!Idte.Gate.u1Present)
5197 {
5198 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5199 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5200 }
5201
5202 /* A null CS is bad. */
5203 RTSEL NewCS = Idte.Gate.u16Sel;
5204 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5207 return iemRaiseGeneralProtectionFault0(pVCpu);
5208 }
5209
5210 /* Fetch the descriptor for the new CS. */
5211 IEMSELDESC DescCS;
5212 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5213 if (rcStrict != VINF_SUCCESS)
5214 {
5215 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5216 return rcStrict;
5217 }
5218
5219 /* Must be a 64-bit code segment. */
5220 if (!DescCS.Long.Gen.u1DescType)
5221 {
5222 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5223 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5224 }
5225 if ( !DescCS.Long.Gen.u1Long
5226 || DescCS.Long.Gen.u1DefBig
5227 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5228 {
5229 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5230 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5231 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5232 }
5233
5234 /* Don't allow lowering the privilege level. For non-conforming CS
5235 selectors, the CS.DPL sets the privilege level the trap/interrupt
5236 handler runs at. For conforming CS selectors, the CPL remains
5237 unchanged, but the CS.DPL must be <= CPL. */
5238 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5239 * when CPU in Ring-0. Result \#GP? */
5240 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5241 {
5242 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5243 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5245 }
5246
5247
5248 /* Make sure the selector is present. */
5249 if (!DescCS.Legacy.Gen.u1Present)
5250 {
5251 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5252 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5253 }
5254
5255 /* Check that the new RIP is canonical. */
5256 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5257 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5258 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5259 if (!IEM_IS_CANONICAL(uNewRip))
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5262 return iemRaiseGeneralProtectionFault0(pVCpu);
5263 }
5264
5265 /*
5266 * If the privilege level changes or if the IST isn't zero, we need to get
5267 * a new stack from the TSS.
5268 */
5269 uint64_t uNewRsp;
5270 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5271 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5272 if ( uNewCpl != pVCpu->iem.s.uCpl
5273 || Idte.Gate.u3IST != 0)
5274 {
5275 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5276 if (rcStrict != VINF_SUCCESS)
5277 return rcStrict;
5278 }
5279 else
5280 uNewRsp = pCtx->rsp;
5281 uNewRsp &= ~(uint64_t)0xf;
5282
5283 /*
5284 * Calc the flag image to push.
5285 */
5286 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5287 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5288 fEfl &= ~X86_EFL_RF;
5289 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5290 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5291
5292 /*
5293 * Start making changes.
5294 */
5295 /* Set the new CPL so that stack accesses use it. */
5296 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5297 pVCpu->iem.s.uCpl = uNewCpl;
5298
5299 /* Create the stack frame. */
5300 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5301 RTPTRUNION uStackFrame;
5302 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5303 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5304 if (rcStrict != VINF_SUCCESS)
5305 return rcStrict;
5306 void * const pvStackFrame = uStackFrame.pv;
5307
5308 if (fFlags & IEM_XCPT_FLAGS_ERR)
5309 *uStackFrame.pu64++ = uErr;
5310 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5311 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5312 uStackFrame.pu64[2] = fEfl;
5313 uStackFrame.pu64[3] = pCtx->rsp;
5314 uStackFrame.pu64[4] = pCtx->ss.Sel;
5315 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5316 if (rcStrict != VINF_SUCCESS)
5317 return rcStrict;
5318
5319 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5320 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5321 * after pushing the stack frame? (Write protect the gdt + stack to
5322 * find out.) */
5323 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5324 {
5325 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5326 if (rcStrict != VINF_SUCCESS)
5327 return rcStrict;
5328 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5329 }
5330
5331 /*
5332 * Start comitting the register changes.
5333 */
5334 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5335 * hidden registers when interrupting 32-bit or 16-bit code! */
5336 if (uNewCpl != uOldCpl)
5337 {
5338 pCtx->ss.Sel = 0 | uNewCpl;
5339 pCtx->ss.ValidSel = 0 | uNewCpl;
5340 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5341 pCtx->ss.u32Limit = UINT32_MAX;
5342 pCtx->ss.u64Base = 0;
5343 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5344 }
5345 pCtx->rsp = uNewRsp - cbStackFrame;
5346 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5347 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5348 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5349 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5350 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5351 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5352 pCtx->rip = uNewRip;
5353
5354 fEfl &= ~fEflToClear;
5355 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5356
5357 if (fFlags & IEM_XCPT_FLAGS_CR2)
5358 pCtx->cr2 = uCr2;
5359
5360 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5361 iemRaiseXcptAdjustState(pCtx, u8Vector);
5362
5363 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5364}
5365
5366
5367/**
5368 * Implements exceptions and interrupts.
5369 *
5370 * All exceptions and interrupts goes thru this function!
5371 *
5372 * @returns VBox strict status code.
5373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5374 * @param cbInstr The number of bytes to offset rIP by in the return
5375 * address.
5376 * @param u8Vector The interrupt / exception vector number.
5377 * @param fFlags The flags.
5378 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5379 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5380 */
5381DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5382iemRaiseXcptOrInt(PVMCPU pVCpu,
5383 uint8_t cbInstr,
5384 uint8_t u8Vector,
5385 uint32_t fFlags,
5386 uint16_t uErr,
5387 uint64_t uCr2)
5388{
5389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5390#ifdef IN_RING0
5391 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5392 AssertRCReturn(rc, rc);
5393#endif
5394
5395#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5396 /*
5397 * Flush prefetch buffer
5398 */
5399 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5400#endif
5401
5402 /*
5403 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5404 */
5405 if ( pCtx->eflags.Bits.u1VM
5406 && pCtx->eflags.Bits.u2IOPL != 3
5407 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5408 && (pCtx->cr0 & X86_CR0_PE) )
5409 {
5410 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5411 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5412 u8Vector = X86_XCPT_GP;
5413 uErr = 0;
5414 }
5415#ifdef DBGFTRACE_ENABLED
5416 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5417 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5418 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5419#endif
5420
5421#ifdef VBOX_WITH_NESTED_HWVIRT
5422 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5423 {
5424 /*
5425 * If the event is being injected as part of VMRUN, it isn't subject to event
5426 * intercepts in the nested-guest. However, secondary exceptions that occur
5427 * during injection of any event -are- subject to exception intercepts.
5428 * See AMD spec. 15.20 "Event Injection".
5429 */
5430 if (!pCtx->hwvirt.svm.fInterceptEvents)
5431 pCtx->hwvirt.svm.fInterceptEvents = 1;
5432 else
5433 {
5434 /*
5435 * Check and handle if the event being raised is intercepted.
5436 */
5437 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5438 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5439 return rcStrict0;
5440 }
5441 }
5442#endif /* VBOX_WITH_NESTED_HWVIRT */
5443
5444 /*
5445 * Do recursion accounting.
5446 */
5447 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5448 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5449 if (pVCpu->iem.s.cXcptRecursions == 0)
5450 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5451 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5452 else
5453 {
5454 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5455 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5456 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5457
5458 if (pVCpu->iem.s.cXcptRecursions >= 3)
5459 {
5460#ifdef DEBUG_bird
5461 AssertFailed();
5462#endif
5463 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5464 }
5465
5466 /*
5467 * Evaluate the sequence of recurring events.
5468 */
5469 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5470 NULL /* pXcptRaiseInfo */);
5471 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5472 { /* likely */ }
5473 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5474 {
5475 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5476 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5477 u8Vector = X86_XCPT_DF;
5478 uErr = 0;
5479 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5480 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5481 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5482 }
5483 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5484 {
5485 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5486 return iemInitiateCpuShutdown(pVCpu);
5487 }
5488 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5489 {
5490 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5491 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5492 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5493 return VERR_EM_GUEST_CPU_HANG;
5494 }
5495 else
5496 {
5497 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5498 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5499 return VERR_IEM_IPE_9;
5500 }
5501
5502 /*
5503 * The 'EXT' bit is set when an exception occurs during deliver of an external
5504 * event (such as an interrupt or earlier exception)[1]. Privileged software
5505 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5506 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5507 *
5508 * [1] - Intel spec. 6.13 "Error Code"
5509 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5510 * [3] - Intel Instruction reference for INT n.
5511 */
5512 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5513 && (fFlags & IEM_XCPT_FLAGS_ERR)
5514 && u8Vector != X86_XCPT_PF
5515 && u8Vector != X86_XCPT_DF)
5516 {
5517 uErr |= X86_TRAP_ERR_EXTERNAL;
5518 }
5519 }
5520
5521 pVCpu->iem.s.cXcptRecursions++;
5522 pVCpu->iem.s.uCurXcpt = u8Vector;
5523 pVCpu->iem.s.fCurXcpt = fFlags;
5524 pVCpu->iem.s.uCurXcptErr = uErr;
5525 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5526
5527 /*
5528 * Extensive logging.
5529 */
5530#if defined(LOG_ENABLED) && defined(IN_RING3)
5531 if (LogIs3Enabled())
5532 {
5533 PVM pVM = pVCpu->CTX_SUFF(pVM);
5534 char szRegs[4096];
5535 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5536 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5537 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5538 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5539 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5540 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5541 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5542 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5543 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5544 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5545 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5546 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5547 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5548 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5549 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5550 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5551 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5552 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5553 " efer=%016VR{efer}\n"
5554 " pat=%016VR{pat}\n"
5555 " sf_mask=%016VR{sf_mask}\n"
5556 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5557 " lstar=%016VR{lstar}\n"
5558 " star=%016VR{star} cstar=%016VR{cstar}\n"
5559 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5560 );
5561
5562 char szInstr[256];
5563 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5564 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5565 szInstr, sizeof(szInstr), NULL);
5566 Log3(("%s%s\n", szRegs, szInstr));
5567 }
5568#endif /* LOG_ENABLED */
5569
5570 /*
5571 * Call the mode specific worker function.
5572 */
5573 VBOXSTRICTRC rcStrict;
5574 if (!(pCtx->cr0 & X86_CR0_PE))
5575 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5576 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5577 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5578 else
5579 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5580
5581 /* Flush the prefetch buffer. */
5582#ifdef IEM_WITH_CODE_TLB
5583 pVCpu->iem.s.pbInstrBuf = NULL;
5584#else
5585 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5586#endif
5587
5588 /*
5589 * Unwind.
5590 */
5591 pVCpu->iem.s.cXcptRecursions--;
5592 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5593 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5594 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5595 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5596 return rcStrict;
5597}
5598
5599#ifdef IEM_WITH_SETJMP
5600/**
5601 * See iemRaiseXcptOrInt. Will not return.
5602 */
5603IEM_STATIC DECL_NO_RETURN(void)
5604iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5605 uint8_t cbInstr,
5606 uint8_t u8Vector,
5607 uint32_t fFlags,
5608 uint16_t uErr,
5609 uint64_t uCr2)
5610{
5611 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5612 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5613}
5614#endif
5615
5616
5617/** \#DE - 00. */
5618DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5619{
5620 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5621}
5622
5623
5624/** \#DB - 01.
5625 * @note This automatically clear DR7.GD. */
5626DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5627{
5628 /** @todo set/clear RF. */
5629 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5630 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5631}
5632
5633
5634/** \#BR - 05. */
5635DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5636{
5637 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5638}
5639
5640
5641/** \#UD - 06. */
5642DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5643{
5644 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5645}
5646
5647
5648/** \#NM - 07. */
5649DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5650{
5651 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5652}
5653
5654
5655/** \#TS(err) - 0a. */
5656DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5657{
5658 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5659}
5660
5661
5662/** \#TS(tr) - 0a. */
5663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5664{
5665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5666 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5667}
5668
5669
5670/** \#TS(0) - 0a. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5674 0, 0);
5675}
5676
5677
5678/** \#TS(err) - 0a. */
5679DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5680{
5681 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5682 uSel & X86_SEL_MASK_OFF_RPL, 0);
5683}
5684
5685
5686/** \#NP(err) - 0b. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5688{
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5690}
5691
5692
5693/** \#NP(sel) - 0b. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5697 uSel & ~X86_SEL_RPL, 0);
5698}
5699
5700
5701/** \#SS(seg) - 0c. */
5702DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5703{
5704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5705 uSel & ~X86_SEL_RPL, 0);
5706}
5707
5708
5709/** \#SS(err) - 0c. */
5710DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5711{
5712 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5713}
5714
5715
5716/** \#GP(n) - 0d. */
5717DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5718{
5719 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5720}
5721
5722
5723/** \#GP(0) - 0d. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5727}
5728
5729#ifdef IEM_WITH_SETJMP
5730/** \#GP(0) - 0d. */
5731DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5732{
5733 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735#endif
5736
5737
5738/** \#GP(sel) - 0d. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5742 Sel & ~X86_SEL_RPL, 0);
5743}
5744
5745
5746/** \#GP(0) - 0d. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5750}
5751
5752
5753/** \#GP(sel) - 0d. */
5754DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5755{
5756 NOREF(iSegReg); NOREF(fAccess);
5757 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5758 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5759}
5760
5761#ifdef IEM_WITH_SETJMP
5762/** \#GP(sel) - 0d, longjmp. */
5763DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5764{
5765 NOREF(iSegReg); NOREF(fAccess);
5766 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5767 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5768}
5769#endif
5770
5771/** \#GP(sel) - 0d. */
5772DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5773{
5774 NOREF(Sel);
5775 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5776}
5777
5778#ifdef IEM_WITH_SETJMP
5779/** \#GP(sel) - 0d, longjmp. */
5780DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5781{
5782 NOREF(Sel);
5783 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5784}
5785#endif
5786
5787
5788/** \#GP(sel) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5790{
5791 NOREF(iSegReg); NOREF(fAccess);
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5793}
5794
5795#ifdef IEM_WITH_SETJMP
5796/** \#GP(sel) - 0d, longjmp. */
5797DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5798 uint32_t fAccess)
5799{
5800 NOREF(iSegReg); NOREF(fAccess);
5801 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5802}
5803#endif
5804
5805
5806/** \#PF(n) - 0e. */
5807DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5808{
5809 uint16_t uErr;
5810 switch (rc)
5811 {
5812 case VERR_PAGE_NOT_PRESENT:
5813 case VERR_PAGE_TABLE_NOT_PRESENT:
5814 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5815 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5816 uErr = 0;
5817 break;
5818
5819 default:
5820 AssertMsgFailed(("%Rrc\n", rc));
5821 RT_FALL_THRU();
5822 case VERR_ACCESS_DENIED:
5823 uErr = X86_TRAP_PF_P;
5824 break;
5825
5826 /** @todo reserved */
5827 }
5828
5829 if (pVCpu->iem.s.uCpl == 3)
5830 uErr |= X86_TRAP_PF_US;
5831
5832 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5833 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5834 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5835 uErr |= X86_TRAP_PF_ID;
5836
5837#if 0 /* This is so much non-sense, really. Why was it done like that? */
5838 /* Note! RW access callers reporting a WRITE protection fault, will clear
5839 the READ flag before calling. So, read-modify-write accesses (RW)
5840 can safely be reported as READ faults. */
5841 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5842 uErr |= X86_TRAP_PF_RW;
5843#else
5844 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5845 {
5846 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5847 uErr |= X86_TRAP_PF_RW;
5848 }
5849#endif
5850
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5852 uErr, GCPtrWhere);
5853}
5854
5855#ifdef IEM_WITH_SETJMP
5856/** \#PF(n) - 0e, longjmp. */
5857IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5858{
5859 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5860}
5861#endif
5862
5863
5864/** \#MF(0) - 10. */
5865DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5866{
5867 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5868}
5869
5870
5871/** \#AC(0) - 11. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5873{
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5875}
5876
5877
5878/**
5879 * Macro for calling iemCImplRaiseDivideError().
5880 *
5881 * This enables us to add/remove arguments and force different levels of
5882 * inlining as we wish.
5883 *
5884 * @return Strict VBox status code.
5885 */
5886#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5887IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5888{
5889 NOREF(cbInstr);
5890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5891}
5892
5893
5894/**
5895 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5896 *
5897 * This enables us to add/remove arguments and force different levels of
5898 * inlining as we wish.
5899 *
5900 * @return Strict VBox status code.
5901 */
5902#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5903IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5904{
5905 NOREF(cbInstr);
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/**
5911 * Macro for calling iemCImplRaiseInvalidOpcode().
5912 *
5913 * This enables us to add/remove arguments and force different levels of
5914 * inlining as we wish.
5915 *
5916 * @return Strict VBox status code.
5917 */
5918#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5919IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5920{
5921 NOREF(cbInstr);
5922 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5923}
5924
5925
5926/** @} */
5927
5928
5929/*
5930 *
5931 * Helpers routines.
5932 * Helpers routines.
5933 * Helpers routines.
5934 *
5935 */
5936
5937/**
5938 * Recalculates the effective operand size.
5939 *
5940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5941 */
5942IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5943{
5944 switch (pVCpu->iem.s.enmCpuMode)
5945 {
5946 case IEMMODE_16BIT:
5947 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5948 break;
5949 case IEMMODE_32BIT:
5950 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5951 break;
5952 case IEMMODE_64BIT:
5953 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5954 {
5955 case 0:
5956 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5957 break;
5958 case IEM_OP_PRF_SIZE_OP:
5959 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5960 break;
5961 case IEM_OP_PRF_SIZE_REX_W:
5962 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5963 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5964 break;
5965 }
5966 break;
5967 default:
5968 AssertFailed();
5969 }
5970}
5971
5972
5973/**
5974 * Sets the default operand size to 64-bit and recalculates the effective
5975 * operand size.
5976 *
5977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5978 */
5979IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5980{
5981 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5982 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5983 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5984 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5985 else
5986 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5987}
5988
5989
5990/*
5991 *
5992 * Common opcode decoders.
5993 * Common opcode decoders.
5994 * Common opcode decoders.
5995 *
5996 */
5997//#include <iprt/mem.h>
5998
5999/**
6000 * Used to add extra details about a stub case.
6001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6002 */
6003IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6004{
6005#if defined(LOG_ENABLED) && defined(IN_RING3)
6006 PVM pVM = pVCpu->CTX_SUFF(pVM);
6007 char szRegs[4096];
6008 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6009 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6010 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6011 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6012 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6013 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6014 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6015 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6016 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6017 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6018 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6019 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6020 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6021 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6022 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6023 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6024 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6025 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6026 " efer=%016VR{efer}\n"
6027 " pat=%016VR{pat}\n"
6028 " sf_mask=%016VR{sf_mask}\n"
6029 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6030 " lstar=%016VR{lstar}\n"
6031 " star=%016VR{star} cstar=%016VR{cstar}\n"
6032 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6033 );
6034
6035 char szInstr[256];
6036 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6037 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6038 szInstr, sizeof(szInstr), NULL);
6039
6040 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6041#else
6042 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6043#endif
6044}
6045
6046/**
6047 * Complains about a stub.
6048 *
6049 * Providing two versions of this macro, one for daily use and one for use when
6050 * working on IEM.
6051 */
6052#if 0
6053# define IEMOP_BITCH_ABOUT_STUB() \
6054 do { \
6055 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6056 iemOpStubMsg2(pVCpu); \
6057 RTAssertPanic(); \
6058 } while (0)
6059#else
6060# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6061#endif
6062
6063/** Stubs an opcode. */
6064#define FNIEMOP_STUB(a_Name) \
6065 FNIEMOP_DEF(a_Name) \
6066 { \
6067 RT_NOREF_PV(pVCpu); \
6068 IEMOP_BITCH_ABOUT_STUB(); \
6069 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6070 } \
6071 typedef int ignore_semicolon
6072
6073/** Stubs an opcode. */
6074#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6075 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6076 { \
6077 RT_NOREF_PV(pVCpu); \
6078 RT_NOREF_PV(a_Name0); \
6079 IEMOP_BITCH_ABOUT_STUB(); \
6080 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6081 } \
6082 typedef int ignore_semicolon
6083
6084/** Stubs an opcode which currently should raise \#UD. */
6085#define FNIEMOP_UD_STUB(a_Name) \
6086 FNIEMOP_DEF(a_Name) \
6087 { \
6088 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6089 return IEMOP_RAISE_INVALID_OPCODE(); \
6090 } \
6091 typedef int ignore_semicolon
6092
6093/** Stubs an opcode which currently should raise \#UD. */
6094#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6095 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6096 { \
6097 RT_NOREF_PV(pVCpu); \
6098 RT_NOREF_PV(a_Name0); \
6099 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6100 return IEMOP_RAISE_INVALID_OPCODE(); \
6101 } \
6102 typedef int ignore_semicolon
6103
6104
6105
6106/** @name Register Access.
6107 * @{
6108 */
6109
6110/**
6111 * Gets a reference (pointer) to the specified hidden segment register.
6112 *
6113 * @returns Hidden register reference.
6114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6115 * @param iSegReg The segment register.
6116 */
6117IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6118{
6119 Assert(iSegReg < X86_SREG_COUNT);
6120 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6121 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6122
6123#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6124 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6125 { /* likely */ }
6126 else
6127 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6128#else
6129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6130#endif
6131 return pSReg;
6132}
6133
6134
6135/**
6136 * Ensures that the given hidden segment register is up to date.
6137 *
6138 * @returns Hidden register reference.
6139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6140 * @param pSReg The segment register.
6141 */
6142IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6143{
6144#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6145 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6146 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6147#else
6148 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6149 NOREF(pVCpu);
6150#endif
6151 return pSReg;
6152}
6153
6154
6155/**
6156 * Gets a reference (pointer) to the specified segment register (the selector
6157 * value).
6158 *
6159 * @returns Pointer to the selector variable.
6160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6161 * @param iSegReg The segment register.
6162 */
6163DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6164{
6165 Assert(iSegReg < X86_SREG_COUNT);
6166 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6167 return &pCtx->aSRegs[iSegReg].Sel;
6168}
6169
6170
6171/**
6172 * Fetches the selector value of a segment register.
6173 *
6174 * @returns The selector value.
6175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6176 * @param iSegReg The segment register.
6177 */
6178DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6179{
6180 Assert(iSegReg < X86_SREG_COUNT);
6181 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6182}
6183
6184
6185/**
6186 * Fetches the base address value of a segment register.
6187 *
6188 * @returns The selector value.
6189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6190 * @param iSegReg The segment register.
6191 */
6192DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6193{
6194 Assert(iSegReg < X86_SREG_COUNT);
6195 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6196}
6197
6198
6199/**
6200 * Gets a reference (pointer) to the specified general purpose register.
6201 *
6202 * @returns Register reference.
6203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6204 * @param iReg The general purpose register.
6205 */
6206DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6207{
6208 Assert(iReg < 16);
6209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6210 return &pCtx->aGRegs[iReg];
6211}
6212
6213
6214/**
6215 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6216 *
6217 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6218 *
6219 * @returns Register reference.
6220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6221 * @param iReg The register.
6222 */
6223DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6224{
6225 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6226 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6227 {
6228 Assert(iReg < 16);
6229 return &pCtx->aGRegs[iReg].u8;
6230 }
6231 /* high 8-bit register. */
6232 Assert(iReg < 8);
6233 return &pCtx->aGRegs[iReg & 3].bHi;
6234}
6235
6236
6237/**
6238 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6239 *
6240 * @returns Register reference.
6241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6242 * @param iReg The register.
6243 */
6244DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6245{
6246 Assert(iReg < 16);
6247 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6248 return &pCtx->aGRegs[iReg].u16;
6249}
6250
6251
6252/**
6253 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6254 *
6255 * @returns Register reference.
6256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6257 * @param iReg The register.
6258 */
6259DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6260{
6261 Assert(iReg < 16);
6262 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6263 return &pCtx->aGRegs[iReg].u32;
6264}
6265
6266
6267/**
6268 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6269 *
6270 * @returns Register reference.
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 * @param iReg The register.
6273 */
6274DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6275{
6276 Assert(iReg < 64);
6277 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6278 return &pCtx->aGRegs[iReg].u64;
6279}
6280
6281
6282/**
6283 * Gets a reference (pointer) to the specified segment register's base address.
6284 *
6285 * @returns Segment register base address reference.
6286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6287 * @param iSegReg The segment selector.
6288 */
6289DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6290{
6291 Assert(iSegReg < X86_SREG_COUNT);
6292 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6293 return &pCtx->aSRegs[iSegReg].u64Base;
6294}
6295
6296
6297/**
6298 * Fetches the value of a 8-bit general purpose register.
6299 *
6300 * @returns The register value.
6301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6302 * @param iReg The register.
6303 */
6304DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6305{
6306 return *iemGRegRefU8(pVCpu, iReg);
6307}
6308
6309
6310/**
6311 * Fetches the value of a 16-bit general purpose register.
6312 *
6313 * @returns The register value.
6314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6315 * @param iReg The register.
6316 */
6317DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6318{
6319 Assert(iReg < 16);
6320 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6321}
6322
6323
6324/**
6325 * Fetches the value of a 32-bit general purpose register.
6326 *
6327 * @returns The register value.
6328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6329 * @param iReg The register.
6330 */
6331DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6332{
6333 Assert(iReg < 16);
6334 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6335}
6336
6337
6338/**
6339 * Fetches the value of a 64-bit general purpose register.
6340 *
6341 * @returns The register value.
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param iReg The register.
6344 */
6345DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6346{
6347 Assert(iReg < 16);
6348 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6349}
6350
6351
6352/**
6353 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6354 *
6355 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6356 * segment limit.
6357 *
6358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6359 * @param offNextInstr The offset of the next instruction.
6360 */
6361IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6362{
6363 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6364 switch (pVCpu->iem.s.enmEffOpSize)
6365 {
6366 case IEMMODE_16BIT:
6367 {
6368 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6369 if ( uNewIp > pCtx->cs.u32Limit
6370 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6371 return iemRaiseGeneralProtectionFault0(pVCpu);
6372 pCtx->rip = uNewIp;
6373 break;
6374 }
6375
6376 case IEMMODE_32BIT:
6377 {
6378 Assert(pCtx->rip <= UINT32_MAX);
6379 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6380
6381 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6382 if (uNewEip > pCtx->cs.u32Limit)
6383 return iemRaiseGeneralProtectionFault0(pVCpu);
6384 pCtx->rip = uNewEip;
6385 break;
6386 }
6387
6388 case IEMMODE_64BIT:
6389 {
6390 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6391
6392 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6393 if (!IEM_IS_CANONICAL(uNewRip))
6394 return iemRaiseGeneralProtectionFault0(pVCpu);
6395 pCtx->rip = uNewRip;
6396 break;
6397 }
6398
6399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6400 }
6401
6402 pCtx->eflags.Bits.u1RF = 0;
6403
6404#ifndef IEM_WITH_CODE_TLB
6405 /* Flush the prefetch buffer. */
6406 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6407#endif
6408
6409 return VINF_SUCCESS;
6410}
6411
6412
6413/**
6414 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6415 *
6416 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6417 * segment limit.
6418 *
6419 * @returns Strict VBox status code.
6420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6421 * @param offNextInstr The offset of the next instruction.
6422 */
6423IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6424{
6425 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6426 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6427
6428 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6429 if ( uNewIp > pCtx->cs.u32Limit
6430 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6431 return iemRaiseGeneralProtectionFault0(pVCpu);
6432 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6433 pCtx->rip = uNewIp;
6434 pCtx->eflags.Bits.u1RF = 0;
6435
6436#ifndef IEM_WITH_CODE_TLB
6437 /* Flush the prefetch buffer. */
6438 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6439#endif
6440
6441 return VINF_SUCCESS;
6442}
6443
6444
6445/**
6446 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6447 *
6448 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6449 * segment limit.
6450 *
6451 * @returns Strict VBox status code.
6452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6453 * @param offNextInstr The offset of the next instruction.
6454 */
6455IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6456{
6457 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6458 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6459
6460 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6461 {
6462 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6463
6464 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6465 if (uNewEip > pCtx->cs.u32Limit)
6466 return iemRaiseGeneralProtectionFault0(pVCpu);
6467 pCtx->rip = uNewEip;
6468 }
6469 else
6470 {
6471 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6472
6473 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6474 if (!IEM_IS_CANONICAL(uNewRip))
6475 return iemRaiseGeneralProtectionFault0(pVCpu);
6476 pCtx->rip = uNewRip;
6477 }
6478 pCtx->eflags.Bits.u1RF = 0;
6479
6480#ifndef IEM_WITH_CODE_TLB
6481 /* Flush the prefetch buffer. */
6482 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6483#endif
6484
6485 return VINF_SUCCESS;
6486}
6487
6488
6489/**
6490 * Performs a near jump to the specified address.
6491 *
6492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6493 * segment limit.
6494 *
6495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6496 * @param uNewRip The new RIP value.
6497 */
6498IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6499{
6500 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6501 switch (pVCpu->iem.s.enmEffOpSize)
6502 {
6503 case IEMMODE_16BIT:
6504 {
6505 Assert(uNewRip <= UINT16_MAX);
6506 if ( uNewRip > pCtx->cs.u32Limit
6507 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6508 return iemRaiseGeneralProtectionFault0(pVCpu);
6509 /** @todo Test 16-bit jump in 64-bit mode. */
6510 pCtx->rip = uNewRip;
6511 break;
6512 }
6513
6514 case IEMMODE_32BIT:
6515 {
6516 Assert(uNewRip <= UINT32_MAX);
6517 Assert(pCtx->rip <= UINT32_MAX);
6518 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6519
6520 if (uNewRip > pCtx->cs.u32Limit)
6521 return iemRaiseGeneralProtectionFault0(pVCpu);
6522 pCtx->rip = uNewRip;
6523 break;
6524 }
6525
6526 case IEMMODE_64BIT:
6527 {
6528 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6529
6530 if (!IEM_IS_CANONICAL(uNewRip))
6531 return iemRaiseGeneralProtectionFault0(pVCpu);
6532 pCtx->rip = uNewRip;
6533 break;
6534 }
6535
6536 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6537 }
6538
6539 pCtx->eflags.Bits.u1RF = 0;
6540
6541#ifndef IEM_WITH_CODE_TLB
6542 /* Flush the prefetch buffer. */
6543 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6544#endif
6545
6546 return VINF_SUCCESS;
6547}
6548
6549
6550/**
6551 * Get the address of the top of the stack.
6552 *
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 * @param pCtx The CPU context which SP/ESP/RSP should be
6555 * read.
6556 */
6557DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6558{
6559 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6560 return pCtx->rsp;
6561 if (pCtx->ss.Attr.n.u1DefBig)
6562 return pCtx->esp;
6563 return pCtx->sp;
6564}
6565
6566
6567/**
6568 * Updates the RIP/EIP/IP to point to the next instruction.
6569 *
6570 * This function leaves the EFLAGS.RF flag alone.
6571 *
6572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6573 * @param cbInstr The number of bytes to add.
6574 */
6575IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6576{
6577 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6578 switch (pVCpu->iem.s.enmCpuMode)
6579 {
6580 case IEMMODE_16BIT:
6581 Assert(pCtx->rip <= UINT16_MAX);
6582 pCtx->eip += cbInstr;
6583 pCtx->eip &= UINT32_C(0xffff);
6584 break;
6585
6586 case IEMMODE_32BIT:
6587 pCtx->eip += cbInstr;
6588 Assert(pCtx->rip <= UINT32_MAX);
6589 break;
6590
6591 case IEMMODE_64BIT:
6592 pCtx->rip += cbInstr;
6593 break;
6594 default: AssertFailed();
6595 }
6596}
6597
6598
6599#if 0
6600/**
6601 * Updates the RIP/EIP/IP to point to the next instruction.
6602 *
6603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6604 */
6605IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6606{
6607 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6608}
6609#endif
6610
6611
6612
6613/**
6614 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6615 *
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 * @param cbInstr The number of bytes to add.
6618 */
6619IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6620{
6621 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6622
6623 pCtx->eflags.Bits.u1RF = 0;
6624
6625 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6626#if ARCH_BITS >= 64
6627 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6628 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6629 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6630#else
6631 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6632 pCtx->rip += cbInstr;
6633 else
6634 pCtx->eip += cbInstr;
6635#endif
6636}
6637
6638
6639/**
6640 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6641 *
6642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6643 */
6644IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6645{
6646 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6647}
6648
6649
6650/**
6651 * Adds to the stack pointer.
6652 *
6653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6654 * @param pCtx The CPU context which SP/ESP/RSP should be
6655 * updated.
6656 * @param cbToAdd The number of bytes to add (8-bit!).
6657 */
6658DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6659{
6660 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6661 pCtx->rsp += cbToAdd;
6662 else if (pCtx->ss.Attr.n.u1DefBig)
6663 pCtx->esp += cbToAdd;
6664 else
6665 pCtx->sp += cbToAdd;
6666}
6667
6668
6669/**
6670 * Subtracts from the stack pointer.
6671 *
6672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6673 * @param pCtx The CPU context which SP/ESP/RSP should be
6674 * updated.
6675 * @param cbToSub The number of bytes to subtract (8-bit!).
6676 */
6677DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6678{
6679 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6680 pCtx->rsp -= cbToSub;
6681 else if (pCtx->ss.Attr.n.u1DefBig)
6682 pCtx->esp -= cbToSub;
6683 else
6684 pCtx->sp -= cbToSub;
6685}
6686
6687
6688/**
6689 * Adds to the temporary stack pointer.
6690 *
6691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6692 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6693 * @param cbToAdd The number of bytes to add (16-bit).
6694 * @param pCtx Where to get the current stack mode.
6695 */
6696DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6697{
6698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6699 pTmpRsp->u += cbToAdd;
6700 else if (pCtx->ss.Attr.n.u1DefBig)
6701 pTmpRsp->DWords.dw0 += cbToAdd;
6702 else
6703 pTmpRsp->Words.w0 += cbToAdd;
6704}
6705
6706
6707/**
6708 * Subtracts from the temporary stack pointer.
6709 *
6710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6711 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6712 * @param cbToSub The number of bytes to subtract.
6713 * @param pCtx Where to get the current stack mode.
6714 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6715 * expecting that.
6716 */
6717DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6718{
6719 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6720 pTmpRsp->u -= cbToSub;
6721 else if (pCtx->ss.Attr.n.u1DefBig)
6722 pTmpRsp->DWords.dw0 -= cbToSub;
6723 else
6724 pTmpRsp->Words.w0 -= cbToSub;
6725}
6726
6727
6728/**
6729 * Calculates the effective stack address for a push of the specified size as
6730 * well as the new RSP value (upper bits may be masked).
6731 *
6732 * @returns Effective stack addressf for the push.
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 * @param pCtx Where to get the current stack mode.
6735 * @param cbItem The size of the stack item to pop.
6736 * @param puNewRsp Where to return the new RSP value.
6737 */
6738DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6739{
6740 RTUINT64U uTmpRsp;
6741 RTGCPTR GCPtrTop;
6742 uTmpRsp.u = pCtx->rsp;
6743
6744 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6745 GCPtrTop = uTmpRsp.u -= cbItem;
6746 else if (pCtx->ss.Attr.n.u1DefBig)
6747 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6748 else
6749 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6750 *puNewRsp = uTmpRsp.u;
6751 return GCPtrTop;
6752}
6753
6754
6755/**
6756 * Gets the current stack pointer and calculates the value after a pop of the
6757 * specified size.
6758 *
6759 * @returns Current stack pointer.
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 * @param pCtx Where to get the current stack mode.
6762 * @param cbItem The size of the stack item to pop.
6763 * @param puNewRsp Where to return the new RSP value.
6764 */
6765DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6766{
6767 RTUINT64U uTmpRsp;
6768 RTGCPTR GCPtrTop;
6769 uTmpRsp.u = pCtx->rsp;
6770
6771 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6772 {
6773 GCPtrTop = uTmpRsp.u;
6774 uTmpRsp.u += cbItem;
6775 }
6776 else if (pCtx->ss.Attr.n.u1DefBig)
6777 {
6778 GCPtrTop = uTmpRsp.DWords.dw0;
6779 uTmpRsp.DWords.dw0 += cbItem;
6780 }
6781 else
6782 {
6783 GCPtrTop = uTmpRsp.Words.w0;
6784 uTmpRsp.Words.w0 += cbItem;
6785 }
6786 *puNewRsp = uTmpRsp.u;
6787 return GCPtrTop;
6788}
6789
6790
6791/**
6792 * Calculates the effective stack address for a push of the specified size as
6793 * well as the new temporary RSP value (upper bits may be masked).
6794 *
6795 * @returns Effective stack addressf for the push.
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 * @param pCtx Where to get the current stack mode.
6798 * @param pTmpRsp The temporary stack pointer. This is updated.
6799 * @param cbItem The size of the stack item to pop.
6800 */
6801DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6802{
6803 RTGCPTR GCPtrTop;
6804
6805 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6806 GCPtrTop = pTmpRsp->u -= cbItem;
6807 else if (pCtx->ss.Attr.n.u1DefBig)
6808 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6809 else
6810 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6811 return GCPtrTop;
6812}
6813
6814
6815/**
6816 * Gets the effective stack address for a pop of the specified size and
6817 * calculates and updates the temporary RSP.
6818 *
6819 * @returns Current stack pointer.
6820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6821 * @param pCtx Where to get the current stack mode.
6822 * @param pTmpRsp The temporary stack pointer. This is updated.
6823 * @param cbItem The size of the stack item to pop.
6824 */
6825DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6826{
6827 RTGCPTR GCPtrTop;
6828 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6829 {
6830 GCPtrTop = pTmpRsp->u;
6831 pTmpRsp->u += cbItem;
6832 }
6833 else if (pCtx->ss.Attr.n.u1DefBig)
6834 {
6835 GCPtrTop = pTmpRsp->DWords.dw0;
6836 pTmpRsp->DWords.dw0 += cbItem;
6837 }
6838 else
6839 {
6840 GCPtrTop = pTmpRsp->Words.w0;
6841 pTmpRsp->Words.w0 += cbItem;
6842 }
6843 return GCPtrTop;
6844}
6845
6846/** @} */
6847
6848
6849/** @name FPU access and helpers.
6850 *
6851 * @{
6852 */
6853
6854
6855/**
6856 * Hook for preparing to use the host FPU.
6857 *
6858 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6859 *
6860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6861 */
6862DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6863{
6864#ifdef IN_RING3
6865 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6866#else
6867 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6868#endif
6869}
6870
6871
6872/**
6873 * Hook for preparing to use the host FPU for SSE.
6874 *
6875 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6876 *
6877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6878 */
6879DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6880{
6881 iemFpuPrepareUsage(pVCpu);
6882}
6883
6884
6885/**
6886 * Hook for preparing to use the host FPU for AVX.
6887 *
6888 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6889 *
6890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6891 */
6892DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6893{
6894 iemFpuPrepareUsage(pVCpu);
6895}
6896
6897
6898/**
6899 * Hook for actualizing the guest FPU state before the interpreter reads it.
6900 *
6901 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6902 *
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 */
6905DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6906{
6907#ifdef IN_RING3
6908 NOREF(pVCpu);
6909#else
6910 CPUMRZFpuStateActualizeForRead(pVCpu);
6911#endif
6912}
6913
6914
6915/**
6916 * Hook for actualizing the guest FPU state before the interpreter changes it.
6917 *
6918 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6919 *
6920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6921 */
6922DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6923{
6924#ifdef IN_RING3
6925 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6926#else
6927 CPUMRZFpuStateActualizeForChange(pVCpu);
6928#endif
6929}
6930
6931
6932/**
6933 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6934 * only.
6935 *
6936 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6937 *
6938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6939 */
6940DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6941{
6942#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6943 NOREF(pVCpu);
6944#else
6945 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6946#endif
6947}
6948
6949
6950/**
6951 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6952 * read+write.
6953 *
6954 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6955 *
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 */
6958DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6959{
6960#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6961 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6962#else
6963 CPUMRZFpuStateActualizeForChange(pVCpu);
6964#endif
6965}
6966
6967
6968/**
6969 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6970 * only.
6971 *
6972 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 */
6976DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6977{
6978#ifdef IN_RING3
6979 NOREF(pVCpu);
6980#else
6981 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6982#endif
6983}
6984
6985
6986/**
6987 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6988 * read+write.
6989 *
6990 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 */
6994DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6995{
6996#ifdef IN_RING3
6997 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6998#else
6999 CPUMRZFpuStateActualizeForChange(pVCpu);
7000#endif
7001}
7002
7003
7004/**
7005 * Stores a QNaN value into a FPU register.
7006 *
7007 * @param pReg Pointer to the register.
7008 */
7009DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7010{
7011 pReg->au32[0] = UINT32_C(0x00000000);
7012 pReg->au32[1] = UINT32_C(0xc0000000);
7013 pReg->au16[4] = UINT16_C(0xffff);
7014}
7015
7016
7017/**
7018 * Updates the FOP, FPU.CS and FPUIP registers.
7019 *
7020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7021 * @param pCtx The CPU context.
7022 * @param pFpuCtx The FPU context.
7023 */
7024DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7025{
7026 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7027 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7028 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7029 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7030 {
7031 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7032 * happens in real mode here based on the fnsave and fnstenv images. */
7033 pFpuCtx->CS = 0;
7034 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7035 }
7036 else
7037 {
7038 pFpuCtx->CS = pCtx->cs.Sel;
7039 pFpuCtx->FPUIP = pCtx->rip;
7040 }
7041}
7042
7043
7044/**
7045 * Updates the x87.DS and FPUDP registers.
7046 *
7047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7048 * @param pCtx The CPU context.
7049 * @param pFpuCtx The FPU context.
7050 * @param iEffSeg The effective segment register.
7051 * @param GCPtrEff The effective address relative to @a iEffSeg.
7052 */
7053DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7054{
7055 RTSEL sel;
7056 switch (iEffSeg)
7057 {
7058 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7059 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7060 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7061 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7062 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7063 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7064 default:
7065 AssertMsgFailed(("%d\n", iEffSeg));
7066 sel = pCtx->ds.Sel;
7067 }
7068 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7069 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7070 {
7071 pFpuCtx->DS = 0;
7072 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7073 }
7074 else
7075 {
7076 pFpuCtx->DS = sel;
7077 pFpuCtx->FPUDP = GCPtrEff;
7078 }
7079}
7080
7081
7082/**
7083 * Rotates the stack registers in the push direction.
7084 *
7085 * @param pFpuCtx The FPU context.
7086 * @remarks This is a complete waste of time, but fxsave stores the registers in
7087 * stack order.
7088 */
7089DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7090{
7091 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7092 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7093 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7094 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7095 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7096 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7097 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7098 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7099 pFpuCtx->aRegs[0].r80 = r80Tmp;
7100}
7101
7102
7103/**
7104 * Rotates the stack registers in the pop direction.
7105 *
7106 * @param pFpuCtx The FPU context.
7107 * @remarks This is a complete waste of time, but fxsave stores the registers in
7108 * stack order.
7109 */
7110DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7111{
7112 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7113 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7114 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7115 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7116 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7117 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7118 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7119 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7120 pFpuCtx->aRegs[7].r80 = r80Tmp;
7121}
7122
7123
7124/**
7125 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7126 * exception prevents it.
7127 *
7128 * @param pResult The FPU operation result to push.
7129 * @param pFpuCtx The FPU context.
7130 */
7131IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7132{
7133 /* Update FSW and bail if there are pending exceptions afterwards. */
7134 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7135 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7136 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7137 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7138 {
7139 pFpuCtx->FSW = fFsw;
7140 return;
7141 }
7142
7143 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7144 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7145 {
7146 /* All is fine, push the actual value. */
7147 pFpuCtx->FTW |= RT_BIT(iNewTop);
7148 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7149 }
7150 else if (pFpuCtx->FCW & X86_FCW_IM)
7151 {
7152 /* Masked stack overflow, push QNaN. */
7153 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7154 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7155 }
7156 else
7157 {
7158 /* Raise stack overflow, don't push anything. */
7159 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7160 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7161 return;
7162 }
7163
7164 fFsw &= ~X86_FSW_TOP_MASK;
7165 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7166 pFpuCtx->FSW = fFsw;
7167
7168 iemFpuRotateStackPush(pFpuCtx);
7169}
7170
7171
7172/**
7173 * Stores a result in a FPU register and updates the FSW and FTW.
7174 *
7175 * @param pFpuCtx The FPU context.
7176 * @param pResult The result to store.
7177 * @param iStReg Which FPU register to store it in.
7178 */
7179IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7180{
7181 Assert(iStReg < 8);
7182 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7183 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7185 pFpuCtx->FTW |= RT_BIT(iReg);
7186 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7187}
7188
7189
7190/**
7191 * Only updates the FPU status word (FSW) with the result of the current
7192 * instruction.
7193 *
7194 * @param pFpuCtx The FPU context.
7195 * @param u16FSW The FSW output of the current instruction.
7196 */
7197IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7198{
7199 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7200 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7201}
7202
7203
7204/**
7205 * Pops one item off the FPU stack if no pending exception prevents it.
7206 *
7207 * @param pFpuCtx The FPU context.
7208 */
7209IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7210{
7211 /* Check pending exceptions. */
7212 uint16_t uFSW = pFpuCtx->FSW;
7213 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7214 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7215 return;
7216
7217 /* TOP--. */
7218 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7219 uFSW &= ~X86_FSW_TOP_MASK;
7220 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7221 pFpuCtx->FSW = uFSW;
7222
7223 /* Mark the previous ST0 as empty. */
7224 iOldTop >>= X86_FSW_TOP_SHIFT;
7225 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7226
7227 /* Rotate the registers. */
7228 iemFpuRotateStackPop(pFpuCtx);
7229}
7230
7231
7232/**
7233 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pResult The FPU operation result to push.
7237 */
7238IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7239{
7240 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7241 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7242 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7243 iemFpuMaybePushResult(pResult, pFpuCtx);
7244}
7245
7246
7247/**
7248 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7249 * and sets FPUDP and FPUDS.
7250 *
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param pResult The FPU operation result to push.
7253 * @param iEffSeg The effective segment register.
7254 * @param GCPtrEff The effective address relative to @a iEffSeg.
7255 */
7256IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7257{
7258 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7259 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7260 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262 iemFpuMaybePushResult(pResult, pFpuCtx);
7263}
7264
7265
7266/**
7267 * Replace ST0 with the first value and push the second onto the FPU stack,
7268 * unless a pending exception prevents it.
7269 *
7270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7271 * @param pResult The FPU operation result to store and push.
7272 */
7273IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7274{
7275 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7276 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7277 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7278
7279 /* Update FSW and bail if there are pending exceptions afterwards. */
7280 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7281 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7282 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7283 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7284 {
7285 pFpuCtx->FSW = fFsw;
7286 return;
7287 }
7288
7289 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7290 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7291 {
7292 /* All is fine, push the actual value. */
7293 pFpuCtx->FTW |= RT_BIT(iNewTop);
7294 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7295 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7296 }
7297 else if (pFpuCtx->FCW & X86_FCW_IM)
7298 {
7299 /* Masked stack overflow, push QNaN. */
7300 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7301 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7302 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7303 }
7304 else
7305 {
7306 /* Raise stack overflow, don't push anything. */
7307 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7308 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7309 return;
7310 }
7311
7312 fFsw &= ~X86_FSW_TOP_MASK;
7313 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7314 pFpuCtx->FSW = fFsw;
7315
7316 iemFpuRotateStackPush(pFpuCtx);
7317}
7318
7319
7320/**
7321 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7322 * FOP.
7323 *
7324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7325 * @param pResult The result to store.
7326 * @param iStReg Which FPU register to store it in.
7327 */
7328IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7329{
7330 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7331 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7332 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7333 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7334}
7335
7336
7337/**
7338 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7339 * FOP, and then pops the stack.
7340 *
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param pResult The result to store.
7343 * @param iStReg Which FPU register to store it in.
7344 */
7345IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7346{
7347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7348 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7349 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7350 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7351 iemFpuMaybePopOne(pFpuCtx);
7352}
7353
7354
7355/**
7356 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7357 * FPUDP, and FPUDS.
7358 *
7359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7360 * @param pResult The result to store.
7361 * @param iStReg Which FPU register to store it in.
7362 * @param iEffSeg The effective memory operand selector register.
7363 * @param GCPtrEff The effective memory operand offset.
7364 */
7365IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7366 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7367{
7368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7369 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7370 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7371 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7372 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7373}
7374
7375
7376/**
7377 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7378 * FPUDP, and FPUDS, and then pops the stack.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pResult The result to store.
7382 * @param iStReg Which FPU register to store it in.
7383 * @param iEffSeg The effective memory operand selector register.
7384 * @param GCPtrEff The effective memory operand offset.
7385 */
7386IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7387 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7388{
7389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7390 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7391 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7392 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7393 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7394 iemFpuMaybePopOne(pFpuCtx);
7395}
7396
7397
7398/**
7399 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 */
7403IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7404{
7405 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7406 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7407 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7408}
7409
7410
7411/**
7412 * Marks the specified stack register as free (for FFREE).
7413 *
7414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7415 * @param iStReg The register to free.
7416 */
7417IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7418{
7419 Assert(iStReg < 8);
7420 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7421 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7422 pFpuCtx->FTW &= ~RT_BIT(iReg);
7423}
7424
7425
7426/**
7427 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7428 *
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 */
7431IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7432{
7433 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7434 uint16_t uFsw = pFpuCtx->FSW;
7435 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7436 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7437 uFsw &= ~X86_FSW_TOP_MASK;
7438 uFsw |= uTop;
7439 pFpuCtx->FSW = uFsw;
7440}
7441
7442
7443/**
7444 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7445 *
7446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7447 */
7448IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7449{
7450 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7451 uint16_t uFsw = pFpuCtx->FSW;
7452 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7453 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7454 uFsw &= ~X86_FSW_TOP_MASK;
7455 uFsw |= uTop;
7456 pFpuCtx->FSW = uFsw;
7457}
7458
7459
7460/**
7461 * Updates the FSW, FOP, FPUIP, and FPUCS.
7462 *
7463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7464 * @param u16FSW The FSW from the current instruction.
7465 */
7466IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7467{
7468 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7469 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7470 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7471 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7472}
7473
7474
7475/**
7476 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param u16FSW The FSW from the current instruction.
7480 */
7481IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7482{
7483 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7484 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7485 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7486 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7487 iemFpuMaybePopOne(pFpuCtx);
7488}
7489
7490
7491/**
7492 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7493 *
7494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7495 * @param u16FSW The FSW from the current instruction.
7496 * @param iEffSeg The effective memory operand selector register.
7497 * @param GCPtrEff The effective memory operand offset.
7498 */
7499IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7500{
7501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7502 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7504 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7505 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7506}
7507
7508
7509/**
7510 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7511 *
7512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7513 * @param u16FSW The FSW from the current instruction.
7514 */
7515IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7516{
7517 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7518 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7519 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7520 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7521 iemFpuMaybePopOne(pFpuCtx);
7522 iemFpuMaybePopOne(pFpuCtx);
7523}
7524
7525
7526/**
7527 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7528 *
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 * @param u16FSW The FSW from the current instruction.
7531 * @param iEffSeg The effective memory operand selector register.
7532 * @param GCPtrEff The effective memory operand offset.
7533 */
7534IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7535{
7536 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7537 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7538 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7539 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7540 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7541 iemFpuMaybePopOne(pFpuCtx);
7542}
7543
7544
7545/**
7546 * Worker routine for raising an FPU stack underflow exception.
7547 *
7548 * @param pFpuCtx The FPU context.
7549 * @param iStReg The stack register being accessed.
7550 */
7551IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7552{
7553 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7554 if (pFpuCtx->FCW & X86_FCW_IM)
7555 {
7556 /* Masked underflow. */
7557 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7558 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7559 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7560 if (iStReg != UINT8_MAX)
7561 {
7562 pFpuCtx->FTW |= RT_BIT(iReg);
7563 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7564 }
7565 }
7566 else
7567 {
7568 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7569 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7570 }
7571}
7572
7573
7574/**
7575 * Raises a FPU stack underflow exception.
7576 *
7577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7578 * @param iStReg The destination register that should be loaded
7579 * with QNaN if \#IS is not masked. Specify
7580 * UINT8_MAX if none (like for fcom).
7581 */
7582DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7583{
7584 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7585 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7586 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7587 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7588}
7589
7590
7591DECL_NO_INLINE(IEM_STATIC, void)
7592iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7593{
7594 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7595 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7597 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7598 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7599}
7600
7601
7602DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7603{
7604 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7605 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7607 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7608 iemFpuMaybePopOne(pFpuCtx);
7609}
7610
7611
7612DECL_NO_INLINE(IEM_STATIC, void)
7613iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7614{
7615 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7616 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7617 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7618 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7619 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7620 iemFpuMaybePopOne(pFpuCtx);
7621}
7622
7623
7624DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7625{
7626 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7627 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7628 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7629 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7630 iemFpuMaybePopOne(pFpuCtx);
7631 iemFpuMaybePopOne(pFpuCtx);
7632}
7633
7634
7635DECL_NO_INLINE(IEM_STATIC, void)
7636iemFpuStackPushUnderflow(PVMCPU pVCpu)
7637{
7638 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7639 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7640 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7641
7642 if (pFpuCtx->FCW & X86_FCW_IM)
7643 {
7644 /* Masked overflow - Push QNaN. */
7645 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7646 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7647 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7648 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7649 pFpuCtx->FTW |= RT_BIT(iNewTop);
7650 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7651 iemFpuRotateStackPush(pFpuCtx);
7652 }
7653 else
7654 {
7655 /* Exception pending - don't change TOP or the register stack. */
7656 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7657 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7658 }
7659}
7660
7661
7662DECL_NO_INLINE(IEM_STATIC, void)
7663iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7664{
7665 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7666 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7667 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7668
7669 if (pFpuCtx->FCW & X86_FCW_IM)
7670 {
7671 /* Masked overflow - Push QNaN. */
7672 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7673 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7674 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7675 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7676 pFpuCtx->FTW |= RT_BIT(iNewTop);
7677 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7678 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7679 iemFpuRotateStackPush(pFpuCtx);
7680 }
7681 else
7682 {
7683 /* Exception pending - don't change TOP or the register stack. */
7684 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7685 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7686 }
7687}
7688
7689
7690/**
7691 * Worker routine for raising an FPU stack overflow exception on a push.
7692 *
7693 * @param pFpuCtx The FPU context.
7694 */
7695IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7696{
7697 if (pFpuCtx->FCW & X86_FCW_IM)
7698 {
7699 /* Masked overflow. */
7700 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7701 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7702 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7703 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7704 pFpuCtx->FTW |= RT_BIT(iNewTop);
7705 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7706 iemFpuRotateStackPush(pFpuCtx);
7707 }
7708 else
7709 {
7710 /* Exception pending - don't change TOP or the register stack. */
7711 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7712 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7713 }
7714}
7715
7716
7717/**
7718 * Raises a FPU stack overflow exception on a push.
7719 *
7720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7723{
7724 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7725 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7726 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7727 iemFpuStackPushOverflowOnly(pFpuCtx);
7728}
7729
7730
7731/**
7732 * Raises a FPU stack overflow exception on a push with a memory operand.
7733 *
7734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7735 * @param iEffSeg The effective memory operand selector register.
7736 * @param GCPtrEff The effective memory operand offset.
7737 */
7738DECL_NO_INLINE(IEM_STATIC, void)
7739iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7740{
7741 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7742 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7743 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7744 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7745 iemFpuStackPushOverflowOnly(pFpuCtx);
7746}
7747
7748
7749IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7750{
7751 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7752 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7753 if (pFpuCtx->FTW & RT_BIT(iReg))
7754 return VINF_SUCCESS;
7755 return VERR_NOT_FOUND;
7756}
7757
7758
7759IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7760{
7761 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7762 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7763 if (pFpuCtx->FTW & RT_BIT(iReg))
7764 {
7765 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7766 return VINF_SUCCESS;
7767 }
7768 return VERR_NOT_FOUND;
7769}
7770
7771
7772IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7773 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7774{
7775 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7776 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7777 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7778 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7779 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7780 {
7781 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7782 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7783 return VINF_SUCCESS;
7784 }
7785 return VERR_NOT_FOUND;
7786}
7787
7788
7789IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7790{
7791 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7792 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7793 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7794 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7795 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7796 {
7797 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7798 return VINF_SUCCESS;
7799 }
7800 return VERR_NOT_FOUND;
7801}
7802
7803
7804/**
7805 * Updates the FPU exception status after FCW is changed.
7806 *
7807 * @param pFpuCtx The FPU context.
7808 */
7809IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7810{
7811 uint16_t u16Fsw = pFpuCtx->FSW;
7812 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7813 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7814 else
7815 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7816 pFpuCtx->FSW = u16Fsw;
7817}
7818
7819
7820/**
7821 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7822 *
7823 * @returns The full FTW.
7824 * @param pFpuCtx The FPU context.
7825 */
7826IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7827{
7828 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7829 uint16_t u16Ftw = 0;
7830 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7831 for (unsigned iSt = 0; iSt < 8; iSt++)
7832 {
7833 unsigned const iReg = (iSt + iTop) & 7;
7834 if (!(u8Ftw & RT_BIT(iReg)))
7835 u16Ftw |= 3 << (iReg * 2); /* empty */
7836 else
7837 {
7838 uint16_t uTag;
7839 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7840 if (pr80Reg->s.uExponent == 0x7fff)
7841 uTag = 2; /* Exponent is all 1's => Special. */
7842 else if (pr80Reg->s.uExponent == 0x0000)
7843 {
7844 if (pr80Reg->s.u64Mantissa == 0x0000)
7845 uTag = 1; /* All bits are zero => Zero. */
7846 else
7847 uTag = 2; /* Must be special. */
7848 }
7849 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7850 uTag = 0; /* Valid. */
7851 else
7852 uTag = 2; /* Must be special. */
7853
7854 u16Ftw |= uTag << (iReg * 2); /* empty */
7855 }
7856 }
7857
7858 return u16Ftw;
7859}
7860
7861
7862/**
7863 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7864 *
7865 * @returns The compressed FTW.
7866 * @param u16FullFtw The full FTW to convert.
7867 */
7868IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7869{
7870 uint8_t u8Ftw = 0;
7871 for (unsigned i = 0; i < 8; i++)
7872 {
7873 if ((u16FullFtw & 3) != 3 /*empty*/)
7874 u8Ftw |= RT_BIT(i);
7875 u16FullFtw >>= 2;
7876 }
7877
7878 return u8Ftw;
7879}
7880
7881/** @} */
7882
7883
7884/** @name Memory access.
7885 *
7886 * @{
7887 */
7888
7889
7890/**
7891 * Updates the IEMCPU::cbWritten counter if applicable.
7892 *
7893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7894 * @param fAccess The access being accounted for.
7895 * @param cbMem The access size.
7896 */
7897DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7898{
7899 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7900 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7901 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7902}
7903
7904
7905/**
7906 * Checks if the given segment can be written to, raise the appropriate
7907 * exception if not.
7908 *
7909 * @returns VBox strict status code.
7910 *
7911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7912 * @param pHid Pointer to the hidden register.
7913 * @param iSegReg The register number.
7914 * @param pu64BaseAddr Where to return the base address to use for the
7915 * segment. (In 64-bit code it may differ from the
7916 * base in the hidden segment.)
7917 */
7918IEM_STATIC VBOXSTRICTRC
7919iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7920{
7921 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7922 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7923 else
7924 {
7925 if (!pHid->Attr.n.u1Present)
7926 {
7927 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7928 AssertRelease(uSel == 0);
7929 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7930 return iemRaiseGeneralProtectionFault0(pVCpu);
7931 }
7932
7933 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7934 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7935 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7936 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7937 *pu64BaseAddr = pHid->u64Base;
7938 }
7939 return VINF_SUCCESS;
7940}
7941
7942
7943/**
7944 * Checks if the given segment can be read from, raise the appropriate
7945 * exception if not.
7946 *
7947 * @returns VBox strict status code.
7948 *
7949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7950 * @param pHid Pointer to the hidden register.
7951 * @param iSegReg The register number.
7952 * @param pu64BaseAddr Where to return the base address to use for the
7953 * segment. (In 64-bit code it may differ from the
7954 * base in the hidden segment.)
7955 */
7956IEM_STATIC VBOXSTRICTRC
7957iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7958{
7959 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7960 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7961 else
7962 {
7963 if (!pHid->Attr.n.u1Present)
7964 {
7965 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7966 AssertRelease(uSel == 0);
7967 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7968 return iemRaiseGeneralProtectionFault0(pVCpu);
7969 }
7970
7971 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7972 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7973 *pu64BaseAddr = pHid->u64Base;
7974 }
7975 return VINF_SUCCESS;
7976}
7977
7978
7979/**
7980 * Applies the segment limit, base and attributes.
7981 *
7982 * This may raise a \#GP or \#SS.
7983 *
7984 * @returns VBox strict status code.
7985 *
7986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7987 * @param fAccess The kind of access which is being performed.
7988 * @param iSegReg The index of the segment register to apply.
7989 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7990 * TSS, ++).
7991 * @param cbMem The access size.
7992 * @param pGCPtrMem Pointer to the guest memory address to apply
7993 * segmentation to. Input and output parameter.
7994 */
7995IEM_STATIC VBOXSTRICTRC
7996iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7997{
7998 if (iSegReg == UINT8_MAX)
7999 return VINF_SUCCESS;
8000
8001 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8002 switch (pVCpu->iem.s.enmCpuMode)
8003 {
8004 case IEMMODE_16BIT:
8005 case IEMMODE_32BIT:
8006 {
8007 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8008 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8009
8010 if ( pSel->Attr.n.u1Present
8011 && !pSel->Attr.n.u1Unusable)
8012 {
8013 Assert(pSel->Attr.n.u1DescType);
8014 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8015 {
8016 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8017 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8018 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8019
8020 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8021 {
8022 /** @todo CPL check. */
8023 }
8024
8025 /*
8026 * There are two kinds of data selectors, normal and expand down.
8027 */
8028 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8029 {
8030 if ( GCPtrFirst32 > pSel->u32Limit
8031 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8032 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8033 }
8034 else
8035 {
8036 /*
8037 * The upper boundary is defined by the B bit, not the G bit!
8038 */
8039 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8040 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8041 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8042 }
8043 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8044 }
8045 else
8046 {
8047
8048 /*
8049 * Code selector and usually be used to read thru, writing is
8050 * only permitted in real and V8086 mode.
8051 */
8052 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8053 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8054 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8055 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8056 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8057
8058 if ( GCPtrFirst32 > pSel->u32Limit
8059 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8060 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8061
8062 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8063 {
8064 /** @todo CPL check. */
8065 }
8066
8067 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8068 }
8069 }
8070 else
8071 return iemRaiseGeneralProtectionFault0(pVCpu);
8072 return VINF_SUCCESS;
8073 }
8074
8075 case IEMMODE_64BIT:
8076 {
8077 RTGCPTR GCPtrMem = *pGCPtrMem;
8078 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8079 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8080
8081 Assert(cbMem >= 1);
8082 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8083 return VINF_SUCCESS;
8084 return iemRaiseGeneralProtectionFault0(pVCpu);
8085 }
8086
8087 default:
8088 AssertFailedReturn(VERR_IEM_IPE_7);
8089 }
8090}
8091
8092
8093/**
8094 * Translates a virtual address to a physical physical address and checks if we
8095 * can access the page as specified.
8096 *
8097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8098 * @param GCPtrMem The virtual address.
8099 * @param fAccess The intended access.
8100 * @param pGCPhysMem Where to return the physical address.
8101 */
8102IEM_STATIC VBOXSTRICTRC
8103iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8104{
8105 /** @todo Need a different PGM interface here. We're currently using
8106 * generic / REM interfaces. this won't cut it for R0 & RC. */
8107 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8108 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8109 RTGCPHYS GCPhys;
8110 uint64_t fFlags;
8111 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8112 if (RT_FAILURE(rc))
8113 {
8114 /** @todo Check unassigned memory in unpaged mode. */
8115 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8116 *pGCPhysMem = NIL_RTGCPHYS;
8117 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8118 }
8119
8120 /* If the page is writable and does not have the no-exec bit set, all
8121 access is allowed. Otherwise we'll have to check more carefully... */
8122 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8123 {
8124 /* Write to read only memory? */
8125 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8126 && !(fFlags & X86_PTE_RW)
8127 && ( (pVCpu->iem.s.uCpl == 3
8128 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8129 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8130 {
8131 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8132 *pGCPhysMem = NIL_RTGCPHYS;
8133 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8134 }
8135
8136 /* Kernel memory accessed by userland? */
8137 if ( !(fFlags & X86_PTE_US)
8138 && pVCpu->iem.s.uCpl == 3
8139 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8140 {
8141 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8142 *pGCPhysMem = NIL_RTGCPHYS;
8143 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8144 }
8145
8146 /* Executing non-executable memory? */
8147 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8148 && (fFlags & X86_PTE_PAE_NX)
8149 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8150 {
8151 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8152 *pGCPhysMem = NIL_RTGCPHYS;
8153 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8154 VERR_ACCESS_DENIED);
8155 }
8156 }
8157
8158 /*
8159 * Set the dirty / access flags.
8160 * ASSUMES this is set when the address is translated rather than on committ...
8161 */
8162 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8163 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8164 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8165 {
8166 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8167 AssertRC(rc2);
8168 }
8169
8170 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8171 *pGCPhysMem = GCPhys;
8172 return VINF_SUCCESS;
8173}
8174
8175
8176
8177/**
8178 * Maps a physical page.
8179 *
8180 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8182 * @param GCPhysMem The physical address.
8183 * @param fAccess The intended access.
8184 * @param ppvMem Where to return the mapping address.
8185 * @param pLock The PGM lock.
8186 */
8187IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8188{
8189#ifdef IEM_VERIFICATION_MODE_FULL
8190 /* Force the alternative path so we can ignore writes. */
8191 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8192 {
8193 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8194 {
8195 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8196 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8197 if (RT_FAILURE(rc2))
8198 pVCpu->iem.s.fProblematicMemory = true;
8199 }
8200 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8201 }
8202#endif
8203#ifdef IEM_LOG_MEMORY_WRITES
8204 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8205 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8206#endif
8207#ifdef IEM_VERIFICATION_MODE_MINIMAL
8208 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8209#endif
8210
8211 /** @todo This API may require some improving later. A private deal with PGM
8212 * regarding locking and unlocking needs to be struct. A couple of TLBs
8213 * living in PGM, but with publicly accessible inlined access methods
8214 * could perhaps be an even better solution. */
8215 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8216 GCPhysMem,
8217 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8218 pVCpu->iem.s.fBypassHandlers,
8219 ppvMem,
8220 pLock);
8221 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8222 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8223
8224#ifdef IEM_VERIFICATION_MODE_FULL
8225 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8226 pVCpu->iem.s.fProblematicMemory = true;
8227#endif
8228 return rc;
8229}
8230
8231
8232/**
8233 * Unmap a page previously mapped by iemMemPageMap.
8234 *
8235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8236 * @param GCPhysMem The physical address.
8237 * @param fAccess The intended access.
8238 * @param pvMem What iemMemPageMap returned.
8239 * @param pLock The PGM lock.
8240 */
8241DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8242{
8243 NOREF(pVCpu);
8244 NOREF(GCPhysMem);
8245 NOREF(fAccess);
8246 NOREF(pvMem);
8247 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8248}
8249
8250
8251/**
8252 * Looks up a memory mapping entry.
8253 *
8254 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8256 * @param pvMem The memory address.
8257 * @param fAccess The access to.
8258 */
8259DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8260{
8261 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8262 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8263 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8264 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8265 return 0;
8266 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8267 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8268 return 1;
8269 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8270 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8271 return 2;
8272 return VERR_NOT_FOUND;
8273}
8274
8275
8276/**
8277 * Finds a free memmap entry when using iNextMapping doesn't work.
8278 *
8279 * @returns Memory mapping index, 1024 on failure.
8280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8281 */
8282IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8283{
8284 /*
8285 * The easy case.
8286 */
8287 if (pVCpu->iem.s.cActiveMappings == 0)
8288 {
8289 pVCpu->iem.s.iNextMapping = 1;
8290 return 0;
8291 }
8292
8293 /* There should be enough mappings for all instructions. */
8294 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8295
8296 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8297 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8298 return i;
8299
8300 AssertFailedReturn(1024);
8301}
8302
8303
8304/**
8305 * Commits a bounce buffer that needs writing back and unmaps it.
8306 *
8307 * @returns Strict VBox status code.
8308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8309 * @param iMemMap The index of the buffer to commit.
8310 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8311 * Always false in ring-3, obviously.
8312 */
8313IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8314{
8315 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8316 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8317#ifdef IN_RING3
8318 Assert(!fPostponeFail);
8319 RT_NOREF_PV(fPostponeFail);
8320#endif
8321
8322 /*
8323 * Do the writing.
8324 */
8325#ifndef IEM_VERIFICATION_MODE_MINIMAL
8326 PVM pVM = pVCpu->CTX_SUFF(pVM);
8327 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8328 && !IEM_VERIFICATION_ENABLED(pVCpu))
8329 {
8330 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8331 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8332 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8333 if (!pVCpu->iem.s.fBypassHandlers)
8334 {
8335 /*
8336 * Carefully and efficiently dealing with access handler return
8337 * codes make this a little bloated.
8338 */
8339 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8341 pbBuf,
8342 cbFirst,
8343 PGMACCESSORIGIN_IEM);
8344 if (rcStrict == VINF_SUCCESS)
8345 {
8346 if (cbSecond)
8347 {
8348 rcStrict = PGMPhysWrite(pVM,
8349 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8350 pbBuf + cbFirst,
8351 cbSecond,
8352 PGMACCESSORIGIN_IEM);
8353 if (rcStrict == VINF_SUCCESS)
8354 { /* nothing */ }
8355 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8356 {
8357 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8360 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8361 }
8362# ifndef IN_RING3
8363 else if (fPostponeFail)
8364 {
8365 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8367 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8368 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8369 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8370 return iemSetPassUpStatus(pVCpu, rcStrict);
8371 }
8372# endif
8373 else
8374 {
8375 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8378 return rcStrict;
8379 }
8380 }
8381 }
8382 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8383 {
8384 if (!cbSecond)
8385 {
8386 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8388 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8389 }
8390 else
8391 {
8392 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8394 pbBuf + cbFirst,
8395 cbSecond,
8396 PGMACCESSORIGIN_IEM);
8397 if (rcStrict2 == VINF_SUCCESS)
8398 {
8399 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8402 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8403 }
8404 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8405 {
8406 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8409 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8410 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8411 }
8412# ifndef IN_RING3
8413 else if (fPostponeFail)
8414 {
8415 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8418 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8419 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8420 return iemSetPassUpStatus(pVCpu, rcStrict);
8421 }
8422# endif
8423 else
8424 {
8425 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8428 return rcStrict2;
8429 }
8430 }
8431 }
8432# ifndef IN_RING3
8433 else if (fPostponeFail)
8434 {
8435 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8438 if (!cbSecond)
8439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8440 else
8441 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8442 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8443 return iemSetPassUpStatus(pVCpu, rcStrict);
8444 }
8445# endif
8446 else
8447 {
8448 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8451 return rcStrict;
8452 }
8453 }
8454 else
8455 {
8456 /*
8457 * No access handlers, much simpler.
8458 */
8459 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8460 if (RT_SUCCESS(rc))
8461 {
8462 if (cbSecond)
8463 {
8464 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8465 if (RT_SUCCESS(rc))
8466 { /* likely */ }
8467 else
8468 {
8469 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8472 return rc;
8473 }
8474 }
8475 }
8476 else
8477 {
8478 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8481 return rc;
8482 }
8483 }
8484 }
8485#endif
8486
8487#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8488 /*
8489 * Record the write(s).
8490 */
8491 if (!pVCpu->iem.s.fNoRem)
8492 {
8493 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8494 if (pEvtRec)
8495 {
8496 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8497 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8498 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8499 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8500 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8501 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8502 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8503 }
8504 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8505 {
8506 pEvtRec = iemVerifyAllocRecord(pVCpu);
8507 if (pEvtRec)
8508 {
8509 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8510 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8511 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8512 memcpy(pEvtRec->u.RamWrite.ab,
8513 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8515 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8516 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8517 }
8518 }
8519 }
8520#endif
8521#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8522 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8523 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8524 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8525 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8526 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8527 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8528
8529 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8530 g_cbIemWrote = cbWrote;
8531 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8532#endif
8533
8534 /*
8535 * Free the mapping entry.
8536 */
8537 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8538 Assert(pVCpu->iem.s.cActiveMappings != 0);
8539 pVCpu->iem.s.cActiveMappings--;
8540 return VINF_SUCCESS;
8541}
8542
8543
8544/**
8545 * iemMemMap worker that deals with a request crossing pages.
8546 */
8547IEM_STATIC VBOXSTRICTRC
8548iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8549{
8550 /*
8551 * Do the address translations.
8552 */
8553 RTGCPHYS GCPhysFirst;
8554 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8555 if (rcStrict != VINF_SUCCESS)
8556 return rcStrict;
8557
8558 RTGCPHYS GCPhysSecond;
8559 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8560 fAccess, &GCPhysSecond);
8561 if (rcStrict != VINF_SUCCESS)
8562 return rcStrict;
8563 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8564
8565 PVM pVM = pVCpu->CTX_SUFF(pVM);
8566#ifdef IEM_VERIFICATION_MODE_FULL
8567 /*
8568 * Detect problematic memory when verifying so we can select
8569 * the right execution engine. (TLB: Redo this.)
8570 */
8571 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8572 {
8573 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8574 if (RT_SUCCESS(rc2))
8575 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8576 if (RT_FAILURE(rc2))
8577 pVCpu->iem.s.fProblematicMemory = true;
8578 }
8579#endif
8580
8581
8582 /*
8583 * Read in the current memory content if it's a read, execute or partial
8584 * write access.
8585 */
8586 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8587 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8588 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8589
8590 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8591 {
8592 if (!pVCpu->iem.s.fBypassHandlers)
8593 {
8594 /*
8595 * Must carefully deal with access handler status codes here,
8596 * makes the code a bit bloated.
8597 */
8598 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8599 if (rcStrict == VINF_SUCCESS)
8600 {
8601 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8602 if (rcStrict == VINF_SUCCESS)
8603 { /*likely */ }
8604 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8605 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8606 else
8607 {
8608 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8609 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8610 return rcStrict;
8611 }
8612 }
8613 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8614 {
8615 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8616 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8617 {
8618 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8619 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8620 }
8621 else
8622 {
8623 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8624 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8625 return rcStrict2;
8626 }
8627 }
8628 else
8629 {
8630 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8631 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8632 return rcStrict;
8633 }
8634 }
8635 else
8636 {
8637 /*
8638 * No informational status codes here, much more straight forward.
8639 */
8640 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8641 if (RT_SUCCESS(rc))
8642 {
8643 Assert(rc == VINF_SUCCESS);
8644 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8645 if (RT_SUCCESS(rc))
8646 Assert(rc == VINF_SUCCESS);
8647 else
8648 {
8649 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8650 return rc;
8651 }
8652 }
8653 else
8654 {
8655 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8656 return rc;
8657 }
8658 }
8659
8660#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8661 if ( !pVCpu->iem.s.fNoRem
8662 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8663 {
8664 /*
8665 * Record the reads.
8666 */
8667 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8668 if (pEvtRec)
8669 {
8670 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8671 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8672 pEvtRec->u.RamRead.cb = cbFirstPage;
8673 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8674 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8675 }
8676 pEvtRec = iemVerifyAllocRecord(pVCpu);
8677 if (pEvtRec)
8678 {
8679 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8680 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8681 pEvtRec->u.RamRead.cb = cbSecondPage;
8682 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8683 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8684 }
8685 }
8686#endif
8687 }
8688#ifdef VBOX_STRICT
8689 else
8690 memset(pbBuf, 0xcc, cbMem);
8691 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8692 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8693#endif
8694
8695 /*
8696 * Commit the bounce buffer entry.
8697 */
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8699 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8700 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8701 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8702 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8703 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8704 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8705 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8706 pVCpu->iem.s.cActiveMappings++;
8707
8708 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8709 *ppvMem = pbBuf;
8710 return VINF_SUCCESS;
8711}
8712
8713
8714/**
8715 * iemMemMap woker that deals with iemMemPageMap failures.
8716 */
8717IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8718 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8719{
8720 /*
8721 * Filter out conditions we can handle and the ones which shouldn't happen.
8722 */
8723 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8724 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8725 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8726 {
8727 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8728 return rcMap;
8729 }
8730 pVCpu->iem.s.cPotentialExits++;
8731
8732 /*
8733 * Read in the current memory content if it's a read, execute or partial
8734 * write access.
8735 */
8736 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8737 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8738 {
8739 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8740 memset(pbBuf, 0xff, cbMem);
8741 else
8742 {
8743 int rc;
8744 if (!pVCpu->iem.s.fBypassHandlers)
8745 {
8746 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8747 if (rcStrict == VINF_SUCCESS)
8748 { /* nothing */ }
8749 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8750 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8751 else
8752 {
8753 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8754 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8755 return rcStrict;
8756 }
8757 }
8758 else
8759 {
8760 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8761 if (RT_SUCCESS(rc))
8762 { /* likely */ }
8763 else
8764 {
8765 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8766 GCPhysFirst, rc));
8767 return rc;
8768 }
8769 }
8770 }
8771
8772#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8773 if ( !pVCpu->iem.s.fNoRem
8774 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8775 {
8776 /*
8777 * Record the read.
8778 */
8779 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8780 if (pEvtRec)
8781 {
8782 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8783 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8784 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8785 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8786 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8787 }
8788 }
8789#endif
8790 }
8791#ifdef VBOX_STRICT
8792 else
8793 memset(pbBuf, 0xcc, cbMem);
8794#endif
8795#ifdef VBOX_STRICT
8796 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8797 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8798#endif
8799
8800 /*
8801 * Commit the bounce buffer entry.
8802 */
8803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8804 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8805 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8806 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8807 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8808 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8809 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8810 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8811 pVCpu->iem.s.cActiveMappings++;
8812
8813 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8814 *ppvMem = pbBuf;
8815 return VINF_SUCCESS;
8816}
8817
8818
8819
8820/**
8821 * Maps the specified guest memory for the given kind of access.
8822 *
8823 * This may be using bounce buffering of the memory if it's crossing a page
8824 * boundary or if there is an access handler installed for any of it. Because
8825 * of lock prefix guarantees, we're in for some extra clutter when this
8826 * happens.
8827 *
8828 * This may raise a \#GP, \#SS, \#PF or \#AC.
8829 *
8830 * @returns VBox strict status code.
8831 *
8832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8833 * @param ppvMem Where to return the pointer to the mapped
8834 * memory.
8835 * @param cbMem The number of bytes to map. This is usually 1,
8836 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8837 * string operations it can be up to a page.
8838 * @param iSegReg The index of the segment register to use for
8839 * this access. The base and limits are checked.
8840 * Use UINT8_MAX to indicate that no segmentation
8841 * is required (for IDT, GDT and LDT accesses).
8842 * @param GCPtrMem The address of the guest memory.
8843 * @param fAccess How the memory is being accessed. The
8844 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8845 * how to map the memory, while the
8846 * IEM_ACCESS_WHAT_XXX bit is used when raising
8847 * exceptions.
8848 */
8849IEM_STATIC VBOXSTRICTRC
8850iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8851{
8852 /*
8853 * Check the input and figure out which mapping entry to use.
8854 */
8855 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8856 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8857 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8858
8859 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8860 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8861 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8862 {
8863 iMemMap = iemMemMapFindFree(pVCpu);
8864 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8865 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8866 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8867 pVCpu->iem.s.aMemMappings[2].fAccess),
8868 VERR_IEM_IPE_9);
8869 }
8870
8871 /*
8872 * Map the memory, checking that we can actually access it. If something
8873 * slightly complicated happens, fall back on bounce buffering.
8874 */
8875 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8876 if (rcStrict != VINF_SUCCESS)
8877 return rcStrict;
8878
8879 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8880 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8881
8882 RTGCPHYS GCPhysFirst;
8883 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8884 if (rcStrict != VINF_SUCCESS)
8885 return rcStrict;
8886
8887 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8888 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8889 if (fAccess & IEM_ACCESS_TYPE_READ)
8890 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8891
8892 void *pvMem;
8893 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8894 if (rcStrict != VINF_SUCCESS)
8895 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8896
8897 /*
8898 * Fill in the mapping table entry.
8899 */
8900 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8902 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8903 pVCpu->iem.s.cActiveMappings++;
8904
8905 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8906 *ppvMem = pvMem;
8907 return VINF_SUCCESS;
8908}
8909
8910
8911/**
8912 * Commits the guest memory if bounce buffered and unmaps it.
8913 *
8914 * @returns Strict VBox status code.
8915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8916 * @param pvMem The mapping.
8917 * @param fAccess The kind of access.
8918 */
8919IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8920{
8921 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8922 AssertReturn(iMemMap >= 0, iMemMap);
8923
8924 /* If it's bounce buffered, we may need to write back the buffer. */
8925 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8926 {
8927 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8928 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8929 }
8930 /* Otherwise unlock it. */
8931 else
8932 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8933
8934 /* Free the entry. */
8935 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8936 Assert(pVCpu->iem.s.cActiveMappings != 0);
8937 pVCpu->iem.s.cActiveMappings--;
8938 return VINF_SUCCESS;
8939}
8940
8941#ifdef IEM_WITH_SETJMP
8942
8943/**
8944 * Maps the specified guest memory for the given kind of access, longjmp on
8945 * error.
8946 *
8947 * This may be using bounce buffering of the memory if it's crossing a page
8948 * boundary or if there is an access handler installed for any of it. Because
8949 * of lock prefix guarantees, we're in for some extra clutter when this
8950 * happens.
8951 *
8952 * This may raise a \#GP, \#SS, \#PF or \#AC.
8953 *
8954 * @returns Pointer to the mapped memory.
8955 *
8956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8957 * @param cbMem The number of bytes to map. This is usually 1,
8958 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8959 * string operations it can be up to a page.
8960 * @param iSegReg The index of the segment register to use for
8961 * this access. The base and limits are checked.
8962 * Use UINT8_MAX to indicate that no segmentation
8963 * is required (for IDT, GDT and LDT accesses).
8964 * @param GCPtrMem The address of the guest memory.
8965 * @param fAccess How the memory is being accessed. The
8966 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8967 * how to map the memory, while the
8968 * IEM_ACCESS_WHAT_XXX bit is used when raising
8969 * exceptions.
8970 */
8971IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8972{
8973 /*
8974 * Check the input and figure out which mapping entry to use.
8975 */
8976 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8977 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8978 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8979
8980 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8981 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8982 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8983 {
8984 iMemMap = iemMemMapFindFree(pVCpu);
8985 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8986 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8987 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8988 pVCpu->iem.s.aMemMappings[2].fAccess),
8989 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8990 }
8991
8992 /*
8993 * Map the memory, checking that we can actually access it. If something
8994 * slightly complicated happens, fall back on bounce buffering.
8995 */
8996 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8997 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8998 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8999
9000 /* Crossing a page boundary? */
9001 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9002 { /* No (likely). */ }
9003 else
9004 {
9005 void *pvMem;
9006 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9007 if (rcStrict == VINF_SUCCESS)
9008 return pvMem;
9009 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9010 }
9011
9012 RTGCPHYS GCPhysFirst;
9013 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9014 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9015 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016
9017 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9018 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9019 if (fAccess & IEM_ACCESS_TYPE_READ)
9020 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9021
9022 void *pvMem;
9023 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9024 if (rcStrict == VINF_SUCCESS)
9025 { /* likely */ }
9026 else
9027 {
9028 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9029 if (rcStrict == VINF_SUCCESS)
9030 return pvMem;
9031 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9032 }
9033
9034 /*
9035 * Fill in the mapping table entry.
9036 */
9037 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9038 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9039 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9040 pVCpu->iem.s.cActiveMappings++;
9041
9042 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9043 return pvMem;
9044}
9045
9046
9047/**
9048 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9049 *
9050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9051 * @param pvMem The mapping.
9052 * @param fAccess The kind of access.
9053 */
9054IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9055{
9056 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9057 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9058
9059 /* If it's bounce buffered, we may need to write back the buffer. */
9060 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9061 {
9062 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9063 {
9064 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9065 if (rcStrict == VINF_SUCCESS)
9066 return;
9067 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9068 }
9069 }
9070 /* Otherwise unlock it. */
9071 else
9072 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9073
9074 /* Free the entry. */
9075 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9076 Assert(pVCpu->iem.s.cActiveMappings != 0);
9077 pVCpu->iem.s.cActiveMappings--;
9078}
9079
9080#endif
9081
9082#ifndef IN_RING3
9083/**
9084 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9085 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9086 *
9087 * Allows the instruction to be completed and retired, while the IEM user will
9088 * return to ring-3 immediately afterwards and do the postponed writes there.
9089 *
9090 * @returns VBox status code (no strict statuses). Caller must check
9091 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param pvMem The mapping.
9094 * @param fAccess The kind of access.
9095 */
9096IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9097{
9098 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9099 AssertReturn(iMemMap >= 0, iMemMap);
9100
9101 /* If it's bounce buffered, we may need to write back the buffer. */
9102 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9103 {
9104 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9105 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9106 }
9107 /* Otherwise unlock it. */
9108 else
9109 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9110
9111 /* Free the entry. */
9112 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9113 Assert(pVCpu->iem.s.cActiveMappings != 0);
9114 pVCpu->iem.s.cActiveMappings--;
9115 return VINF_SUCCESS;
9116}
9117#endif
9118
9119
9120/**
9121 * Rollbacks mappings, releasing page locks and such.
9122 *
9123 * The caller shall only call this after checking cActiveMappings.
9124 *
9125 * @returns Strict VBox status code to pass up.
9126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9127 */
9128IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9129{
9130 Assert(pVCpu->iem.s.cActiveMappings > 0);
9131
9132 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9133 while (iMemMap-- > 0)
9134 {
9135 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9136 if (fAccess != IEM_ACCESS_INVALID)
9137 {
9138 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9139 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9140 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9141 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9142 Assert(pVCpu->iem.s.cActiveMappings > 0);
9143 pVCpu->iem.s.cActiveMappings--;
9144 }
9145 }
9146}
9147
9148
9149/**
9150 * Fetches a data byte.
9151 *
9152 * @returns Strict VBox status code.
9153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9154 * @param pu8Dst Where to return the byte.
9155 * @param iSegReg The index of the segment register to use for
9156 * this access. The base and limits are checked.
9157 * @param GCPtrMem The address of the guest memory.
9158 */
9159IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9160{
9161 /* The lazy approach for now... */
9162 uint8_t const *pu8Src;
9163 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9164 if (rc == VINF_SUCCESS)
9165 {
9166 *pu8Dst = *pu8Src;
9167 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9168 }
9169 return rc;
9170}
9171
9172
9173#ifdef IEM_WITH_SETJMP
9174/**
9175 * Fetches a data byte, longjmp on error.
9176 *
9177 * @returns The byte.
9178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9179 * @param iSegReg The index of the segment register to use for
9180 * this access. The base and limits are checked.
9181 * @param GCPtrMem The address of the guest memory.
9182 */
9183DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9184{
9185 /* The lazy approach for now... */
9186 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9187 uint8_t const bRet = *pu8Src;
9188 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9189 return bRet;
9190}
9191#endif /* IEM_WITH_SETJMP */
9192
9193
9194/**
9195 * Fetches a data word.
9196 *
9197 * @returns Strict VBox status code.
9198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9199 * @param pu16Dst Where to return the word.
9200 * @param iSegReg The index of the segment register to use for
9201 * this access. The base and limits are checked.
9202 * @param GCPtrMem The address of the guest memory.
9203 */
9204IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9205{
9206 /* The lazy approach for now... */
9207 uint16_t const *pu16Src;
9208 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9209 if (rc == VINF_SUCCESS)
9210 {
9211 *pu16Dst = *pu16Src;
9212 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9213 }
9214 return rc;
9215}
9216
9217
9218#ifdef IEM_WITH_SETJMP
9219/**
9220 * Fetches a data word, longjmp on error.
9221 *
9222 * @returns The word
9223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9224 * @param iSegReg The index of the segment register to use for
9225 * this access. The base and limits are checked.
9226 * @param GCPtrMem The address of the guest memory.
9227 */
9228DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9229{
9230 /* The lazy approach for now... */
9231 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9232 uint16_t const u16Ret = *pu16Src;
9233 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9234 return u16Ret;
9235}
9236#endif
9237
9238
9239/**
9240 * Fetches a data dword.
9241 *
9242 * @returns Strict VBox status code.
9243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9244 * @param pu32Dst Where to return the dword.
9245 * @param iSegReg The index of the segment register to use for
9246 * this access. The base and limits are checked.
9247 * @param GCPtrMem The address of the guest memory.
9248 */
9249IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9250{
9251 /* The lazy approach for now... */
9252 uint32_t const *pu32Src;
9253 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9254 if (rc == VINF_SUCCESS)
9255 {
9256 *pu32Dst = *pu32Src;
9257 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9258 }
9259 return rc;
9260}
9261
9262
9263#ifdef IEM_WITH_SETJMP
9264
9265IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9266{
9267 Assert(cbMem >= 1);
9268 Assert(iSegReg < X86_SREG_COUNT);
9269
9270 /*
9271 * 64-bit mode is simpler.
9272 */
9273 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9274 {
9275 if (iSegReg >= X86_SREG_FS)
9276 {
9277 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9278 GCPtrMem += pSel->u64Base;
9279 }
9280
9281 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9282 return GCPtrMem;
9283 }
9284 /*
9285 * 16-bit and 32-bit segmentation.
9286 */
9287 else
9288 {
9289 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9290 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9291 == X86DESCATTR_P /* data, expand up */
9292 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9293 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9294 {
9295 /* expand up */
9296 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9297 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9298 && GCPtrLast32 > (uint32_t)GCPtrMem))
9299 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9300 }
9301 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9302 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9303 {
9304 /* expand down */
9305 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9306 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9307 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9308 && GCPtrLast32 > (uint32_t)GCPtrMem))
9309 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9310 }
9311 else
9312 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9313 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9314 }
9315 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9316}
9317
9318
9319IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9320{
9321 Assert(cbMem >= 1);
9322 Assert(iSegReg < X86_SREG_COUNT);
9323
9324 /*
9325 * 64-bit mode is simpler.
9326 */
9327 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9328 {
9329 if (iSegReg >= X86_SREG_FS)
9330 {
9331 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9332 GCPtrMem += pSel->u64Base;
9333 }
9334
9335 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9336 return GCPtrMem;
9337 }
9338 /*
9339 * 16-bit and 32-bit segmentation.
9340 */
9341 else
9342 {
9343 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9344 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9345 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9346 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9347 {
9348 /* expand up */
9349 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9350 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9351 && GCPtrLast32 > (uint32_t)GCPtrMem))
9352 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9353 }
9354 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9355 {
9356 /* expand down */
9357 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9358 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9359 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9360 && GCPtrLast32 > (uint32_t)GCPtrMem))
9361 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9362 }
9363 else
9364 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9365 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9366 }
9367 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9368}
9369
9370
9371/**
9372 * Fetches a data dword, longjmp on error, fallback/safe version.
9373 *
9374 * @returns The dword
9375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9376 * @param iSegReg The index of the segment register to use for
9377 * this access. The base and limits are checked.
9378 * @param GCPtrMem The address of the guest memory.
9379 */
9380IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9381{
9382 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9383 uint32_t const u32Ret = *pu32Src;
9384 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9385 return u32Ret;
9386}
9387
9388
9389/**
9390 * Fetches a data dword, longjmp on error.
9391 *
9392 * @returns The dword
9393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 */
9398DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9399{
9400# ifdef IEM_WITH_DATA_TLB
9401 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9402 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9403 {
9404 /// @todo more later.
9405 }
9406
9407 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9408# else
9409 /* The lazy approach. */
9410 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9411 uint32_t const u32Ret = *pu32Src;
9412 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9413 return u32Ret;
9414# endif
9415}
9416#endif
9417
9418
9419#ifdef SOME_UNUSED_FUNCTION
9420/**
9421 * Fetches a data dword and sign extends it to a qword.
9422 *
9423 * @returns Strict VBox status code.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param pu64Dst Where to return the sign extended value.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432 /* The lazy approach for now... */
9433 int32_t const *pi32Src;
9434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9435 if (rc == VINF_SUCCESS)
9436 {
9437 *pu64Dst = *pi32Src;
9438 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9439 }
9440#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9441 else
9442 *pu64Dst = 0;
9443#endif
9444 return rc;
9445}
9446#endif
9447
9448
9449/**
9450 * Fetches a data qword.
9451 *
9452 * @returns Strict VBox status code.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param pu64Dst Where to return the qword.
9455 * @param iSegReg The index of the segment register to use for
9456 * this access. The base and limits are checked.
9457 * @param GCPtrMem The address of the guest memory.
9458 */
9459IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9460{
9461 /* The lazy approach for now... */
9462 uint64_t const *pu64Src;
9463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9464 if (rc == VINF_SUCCESS)
9465 {
9466 *pu64Dst = *pu64Src;
9467 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9468 }
9469 return rc;
9470}
9471
9472
9473#ifdef IEM_WITH_SETJMP
9474/**
9475 * Fetches a data qword, longjmp on error.
9476 *
9477 * @returns The qword.
9478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9479 * @param iSegReg The index of the segment register to use for
9480 * this access. The base and limits are checked.
9481 * @param GCPtrMem The address of the guest memory.
9482 */
9483DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9484{
9485 /* The lazy approach for now... */
9486 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9487 uint64_t const u64Ret = *pu64Src;
9488 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9489 return u64Ret;
9490}
9491#endif
9492
9493
9494/**
9495 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9496 *
9497 * @returns Strict VBox status code.
9498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9499 * @param pu64Dst Where to return the qword.
9500 * @param iSegReg The index of the segment register to use for
9501 * this access. The base and limits are checked.
9502 * @param GCPtrMem The address of the guest memory.
9503 */
9504IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9505{
9506 /* The lazy approach for now... */
9507 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9508 if (RT_UNLIKELY(GCPtrMem & 15))
9509 return iemRaiseGeneralProtectionFault0(pVCpu);
9510
9511 uint64_t const *pu64Src;
9512 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9513 if (rc == VINF_SUCCESS)
9514 {
9515 *pu64Dst = *pu64Src;
9516 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9517 }
9518 return rc;
9519}
9520
9521
9522#ifdef IEM_WITH_SETJMP
9523/**
9524 * Fetches a data qword, longjmp on error.
9525 *
9526 * @returns The qword.
9527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9528 * @param iSegReg The index of the segment register to use for
9529 * this access. The base and limits are checked.
9530 * @param GCPtrMem The address of the guest memory.
9531 */
9532DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9533{
9534 /* The lazy approach for now... */
9535 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9536 if (RT_LIKELY(!(GCPtrMem & 15)))
9537 {
9538 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9539 uint64_t const u64Ret = *pu64Src;
9540 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9541 return u64Ret;
9542 }
9543
9544 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9545 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9546}
9547#endif
9548
9549
9550/**
9551 * Fetches a data tword.
9552 *
9553 * @returns Strict VBox status code.
9554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9555 * @param pr80Dst Where to return the tword.
9556 * @param iSegReg The index of the segment register to use for
9557 * this access. The base and limits are checked.
9558 * @param GCPtrMem The address of the guest memory.
9559 */
9560IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9561{
9562 /* The lazy approach for now... */
9563 PCRTFLOAT80U pr80Src;
9564 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9565 if (rc == VINF_SUCCESS)
9566 {
9567 *pr80Dst = *pr80Src;
9568 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9569 }
9570 return rc;
9571}
9572
9573
9574#ifdef IEM_WITH_SETJMP
9575/**
9576 * Fetches a data tword, longjmp on error.
9577 *
9578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9579 * @param pr80Dst Where to return the tword.
9580 * @param iSegReg The index of the segment register to use for
9581 * this access. The base and limits are checked.
9582 * @param GCPtrMem The address of the guest memory.
9583 */
9584DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9585{
9586 /* The lazy approach for now... */
9587 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9588 *pr80Dst = *pr80Src;
9589 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9590}
9591#endif
9592
9593
9594/**
9595 * Fetches a data dqword (double qword), generally SSE related.
9596 *
9597 * @returns Strict VBox status code.
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param pu128Dst Where to return the qword.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 */
9604IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9605{
9606 /* The lazy approach for now... */
9607 PCRTUINT128U pu128Src;
9608 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9609 if (rc == VINF_SUCCESS)
9610 {
9611 pu128Dst->au64[0] = pu128Src->au64[0];
9612 pu128Dst->au64[1] = pu128Src->au64[1];
9613 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9614 }
9615 return rc;
9616}
9617
9618
9619#ifdef IEM_WITH_SETJMP
9620/**
9621 * Fetches a data dqword (double qword), generally SSE related.
9622 *
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param pu128Dst Where to return the qword.
9625 * @param iSegReg The index of the segment register to use for
9626 * this access. The base and limits are checked.
9627 * @param GCPtrMem The address of the guest memory.
9628 */
9629IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9630{
9631 /* The lazy approach for now... */
9632 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9633 pu128Dst->au64[0] = pu128Src->au64[0];
9634 pu128Dst->au64[1] = pu128Src->au64[1];
9635 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9636}
9637#endif
9638
9639
9640/**
9641 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9642 * related.
9643 *
9644 * Raises \#GP(0) if not aligned.
9645 *
9646 * @returns Strict VBox status code.
9647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9648 * @param pu128Dst Where to return the qword.
9649 * @param iSegReg The index of the segment register to use for
9650 * this access. The base and limits are checked.
9651 * @param GCPtrMem The address of the guest memory.
9652 */
9653IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9654{
9655 /* The lazy approach for now... */
9656 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9657 if ( (GCPtrMem & 15)
9658 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9659 return iemRaiseGeneralProtectionFault0(pVCpu);
9660
9661 PCRTUINT128U pu128Src;
9662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 if (rc == VINF_SUCCESS)
9664 {
9665 pu128Dst->au64[0] = pu128Src->au64[0];
9666 pu128Dst->au64[1] = pu128Src->au64[1];
9667 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9668 }
9669 return rc;
9670}
9671
9672
9673#ifdef IEM_WITH_SETJMP
9674/**
9675 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9676 * related, longjmp on error.
9677 *
9678 * Raises \#GP(0) if not aligned.
9679 *
9680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9681 * @param pu128Dst Where to return the qword.
9682 * @param iSegReg The index of the segment register to use for
9683 * this access. The base and limits are checked.
9684 * @param GCPtrMem The address of the guest memory.
9685 */
9686DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9687{
9688 /* The lazy approach for now... */
9689 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9690 if ( (GCPtrMem & 15) == 0
9691 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9692 {
9693 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9694 pu128Dst->au64[0] = pu128Src->au64[0];
9695 pu128Dst->au64[1] = pu128Src->au64[1];
9696 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9697 return;
9698 }
9699
9700 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9701 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9702}
9703#endif
9704
9705
9706/**
9707 * Fetches a data oword (octo word), generally AVX related.
9708 *
9709 * @returns Strict VBox status code.
9710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9711 * @param pu256Dst Where to return the qword.
9712 * @param iSegReg The index of the segment register to use for
9713 * this access. The base and limits are checked.
9714 * @param GCPtrMem The address of the guest memory.
9715 */
9716IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9717{
9718 /* The lazy approach for now... */
9719 PCRTUINT256U pu256Src;
9720 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9721 if (rc == VINF_SUCCESS)
9722 {
9723 pu256Dst->au64[0] = pu256Src->au64[0];
9724 pu256Dst->au64[1] = pu256Src->au64[1];
9725 pu256Dst->au64[2] = pu256Src->au64[2];
9726 pu256Dst->au64[3] = pu256Src->au64[3];
9727 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9728 }
9729 return rc;
9730}
9731
9732
9733#ifdef IEM_WITH_SETJMP
9734/**
9735 * Fetches a data oword (octo word), generally AVX related.
9736 *
9737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9738 * @param pu256Dst Where to return the qword.
9739 * @param iSegReg The index of the segment register to use for
9740 * this access. The base and limits are checked.
9741 * @param GCPtrMem The address of the guest memory.
9742 */
9743IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9744{
9745 /* The lazy approach for now... */
9746 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9747 pu256Dst->au64[0] = pu256Src->au64[0];
9748 pu256Dst->au64[1] = pu256Src->au64[1];
9749 pu256Dst->au64[2] = pu256Src->au64[2];
9750 pu256Dst->au64[3] = pu256Src->au64[3];
9751 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9752}
9753#endif
9754
9755
9756/**
9757 * Fetches a data oword (octo word) at an aligned address, generally AVX
9758 * related.
9759 *
9760 * Raises \#GP(0) if not aligned.
9761 *
9762 * @returns Strict VBox status code.
9763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9764 * @param pu256Dst Where to return the qword.
9765 * @param iSegReg The index of the segment register to use for
9766 * this access. The base and limits are checked.
9767 * @param GCPtrMem The address of the guest memory.
9768 */
9769IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9770{
9771 /* The lazy approach for now... */
9772 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9773 if (GCPtrMem & 31)
9774 return iemRaiseGeneralProtectionFault0(pVCpu);
9775
9776 PCRTUINT256U pu256Src;
9777 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9778 if (rc == VINF_SUCCESS)
9779 {
9780 pu256Dst->au64[0] = pu256Src->au64[0];
9781 pu256Dst->au64[1] = pu256Src->au64[1];
9782 pu256Dst->au64[2] = pu256Src->au64[2];
9783 pu256Dst->au64[3] = pu256Src->au64[3];
9784 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9785 }
9786 return rc;
9787}
9788
9789
9790#ifdef IEM_WITH_SETJMP
9791/**
9792 * Fetches a data oword (octo word) at an aligned address, generally AVX
9793 * related, longjmp on error.
9794 *
9795 * Raises \#GP(0) if not aligned.
9796 *
9797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9798 * @param pu256Dst Where to return the qword.
9799 * @param iSegReg The index of the segment register to use for
9800 * this access. The base and limits are checked.
9801 * @param GCPtrMem The address of the guest memory.
9802 */
9803DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9804{
9805 /* The lazy approach for now... */
9806 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9807 if ((GCPtrMem & 31) == 0)
9808 {
9809 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9810 pu256Dst->au64[0] = pu256Src->au64[0];
9811 pu256Dst->au64[1] = pu256Src->au64[1];
9812 pu256Dst->au64[2] = pu256Src->au64[2];
9813 pu256Dst->au64[3] = pu256Src->au64[3];
9814 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9815 return;
9816 }
9817
9818 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9819 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9820}
9821#endif
9822
9823
9824
9825/**
9826 * Fetches a descriptor register (lgdt, lidt).
9827 *
9828 * @returns Strict VBox status code.
9829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9830 * @param pcbLimit Where to return the limit.
9831 * @param pGCPtrBase Where to return the base.
9832 * @param iSegReg The index of the segment register to use for
9833 * this access. The base and limits are checked.
9834 * @param GCPtrMem The address of the guest memory.
9835 * @param enmOpSize The effective operand size.
9836 */
9837IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9838 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9839{
9840 /*
9841 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9842 * little special:
9843 * - The two reads are done separately.
9844 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9845 * - We suspect the 386 to actually commit the limit before the base in
9846 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9847 * don't try emulate this eccentric behavior, because it's not well
9848 * enough understood and rather hard to trigger.
9849 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9850 */
9851 VBOXSTRICTRC rcStrict;
9852 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9853 {
9854 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9855 if (rcStrict == VINF_SUCCESS)
9856 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9857 }
9858 else
9859 {
9860 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9861 if (enmOpSize == IEMMODE_32BIT)
9862 {
9863 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9864 {
9865 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9866 if (rcStrict == VINF_SUCCESS)
9867 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9868 }
9869 else
9870 {
9871 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9872 if (rcStrict == VINF_SUCCESS)
9873 {
9874 *pcbLimit = (uint16_t)uTmp;
9875 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9876 }
9877 }
9878 if (rcStrict == VINF_SUCCESS)
9879 *pGCPtrBase = uTmp;
9880 }
9881 else
9882 {
9883 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9884 if (rcStrict == VINF_SUCCESS)
9885 {
9886 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9887 if (rcStrict == VINF_SUCCESS)
9888 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9889 }
9890 }
9891 }
9892 return rcStrict;
9893}
9894
9895
9896
9897/**
9898 * Stores a data byte.
9899 *
9900 * @returns Strict VBox status code.
9901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9902 * @param iSegReg The index of the segment register to use for
9903 * this access. The base and limits are checked.
9904 * @param GCPtrMem The address of the guest memory.
9905 * @param u8Value The value to store.
9906 */
9907IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9908{
9909 /* The lazy approach for now... */
9910 uint8_t *pu8Dst;
9911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9912 if (rc == VINF_SUCCESS)
9913 {
9914 *pu8Dst = u8Value;
9915 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9916 }
9917 return rc;
9918}
9919
9920
9921#ifdef IEM_WITH_SETJMP
9922/**
9923 * Stores a data byte, longjmp on error.
9924 *
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param iSegReg The index of the segment register to use for
9927 * this access. The base and limits are checked.
9928 * @param GCPtrMem The address of the guest memory.
9929 * @param u8Value The value to store.
9930 */
9931IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9932{
9933 /* The lazy approach for now... */
9934 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9935 *pu8Dst = u8Value;
9936 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9937}
9938#endif
9939
9940
9941/**
9942 * Stores a data word.
9943 *
9944 * @returns Strict VBox status code.
9945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9946 * @param iSegReg The index of the segment register to use for
9947 * this access. The base and limits are checked.
9948 * @param GCPtrMem The address of the guest memory.
9949 * @param u16Value The value to store.
9950 */
9951IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9952{
9953 /* The lazy approach for now... */
9954 uint16_t *pu16Dst;
9955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9956 if (rc == VINF_SUCCESS)
9957 {
9958 *pu16Dst = u16Value;
9959 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9960 }
9961 return rc;
9962}
9963
9964
9965#ifdef IEM_WITH_SETJMP
9966/**
9967 * Stores a data word, longjmp on error.
9968 *
9969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9970 * @param iSegReg The index of the segment register to use for
9971 * this access. The base and limits are checked.
9972 * @param GCPtrMem The address of the guest memory.
9973 * @param u16Value The value to store.
9974 */
9975IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9976{
9977 /* The lazy approach for now... */
9978 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9979 *pu16Dst = u16Value;
9980 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9981}
9982#endif
9983
9984
9985/**
9986 * Stores a data dword.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param iSegReg The index of the segment register to use for
9991 * this access. The base and limits are checked.
9992 * @param GCPtrMem The address of the guest memory.
9993 * @param u32Value The value to store.
9994 */
9995IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9996{
9997 /* The lazy approach for now... */
9998 uint32_t *pu32Dst;
9999 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10000 if (rc == VINF_SUCCESS)
10001 {
10002 *pu32Dst = u32Value;
10003 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10004 }
10005 return rc;
10006}
10007
10008
10009#ifdef IEM_WITH_SETJMP
10010/**
10011 * Stores a data dword.
10012 *
10013 * @returns Strict VBox status code.
10014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10015 * @param iSegReg The index of the segment register to use for
10016 * this access. The base and limits are checked.
10017 * @param GCPtrMem The address of the guest memory.
10018 * @param u32Value The value to store.
10019 */
10020IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10021{
10022 /* The lazy approach for now... */
10023 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10024 *pu32Dst = u32Value;
10025 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10026}
10027#endif
10028
10029
10030/**
10031 * Stores a data qword.
10032 *
10033 * @returns Strict VBox status code.
10034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10035 * @param iSegReg The index of the segment register to use for
10036 * this access. The base and limits are checked.
10037 * @param GCPtrMem The address of the guest memory.
10038 * @param u64Value The value to store.
10039 */
10040IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10041{
10042 /* The lazy approach for now... */
10043 uint64_t *pu64Dst;
10044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10045 if (rc == VINF_SUCCESS)
10046 {
10047 *pu64Dst = u64Value;
10048 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10049 }
10050 return rc;
10051}
10052
10053
10054#ifdef IEM_WITH_SETJMP
10055/**
10056 * Stores a data qword, longjmp on error.
10057 *
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param iSegReg The index of the segment register to use for
10060 * this access. The base and limits are checked.
10061 * @param GCPtrMem The address of the guest memory.
10062 * @param u64Value The value to store.
10063 */
10064IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10065{
10066 /* The lazy approach for now... */
10067 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10068 *pu64Dst = u64Value;
10069 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10070}
10071#endif
10072
10073
10074/**
10075 * Stores a data dqword.
10076 *
10077 * @returns Strict VBox status code.
10078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10079 * @param iSegReg The index of the segment register to use for
10080 * this access. The base and limits are checked.
10081 * @param GCPtrMem The address of the guest memory.
10082 * @param u128Value The value to store.
10083 */
10084IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10085{
10086 /* The lazy approach for now... */
10087 PRTUINT128U pu128Dst;
10088 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10089 if (rc == VINF_SUCCESS)
10090 {
10091 pu128Dst->au64[0] = u128Value.au64[0];
10092 pu128Dst->au64[1] = u128Value.au64[1];
10093 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10094 }
10095 return rc;
10096}
10097
10098
10099#ifdef IEM_WITH_SETJMP
10100/**
10101 * Stores a data dqword, longjmp on error.
10102 *
10103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10104 * @param iSegReg The index of the segment register to use for
10105 * this access. The base and limits are checked.
10106 * @param GCPtrMem The address of the guest memory.
10107 * @param u128Value The value to store.
10108 */
10109IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10110{
10111 /* The lazy approach for now... */
10112 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10113 pu128Dst->au64[0] = u128Value.au64[0];
10114 pu128Dst->au64[1] = u128Value.au64[1];
10115 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10116}
10117#endif
10118
10119
10120/**
10121 * Stores a data dqword, SSE aligned.
10122 *
10123 * @returns Strict VBox status code.
10124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10125 * @param iSegReg The index of the segment register to use for
10126 * this access. The base and limits are checked.
10127 * @param GCPtrMem The address of the guest memory.
10128 * @param u128Value The value to store.
10129 */
10130IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10131{
10132 /* The lazy approach for now... */
10133 if ( (GCPtrMem & 15)
10134 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10135 return iemRaiseGeneralProtectionFault0(pVCpu);
10136
10137 PRTUINT128U pu128Dst;
10138 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10139 if (rc == VINF_SUCCESS)
10140 {
10141 pu128Dst->au64[0] = u128Value.au64[0];
10142 pu128Dst->au64[1] = u128Value.au64[1];
10143 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10144 }
10145 return rc;
10146}
10147
10148
10149#ifdef IEM_WITH_SETJMP
10150/**
10151 * Stores a data dqword, SSE aligned.
10152 *
10153 * @returns Strict VBox status code.
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param iSegReg The index of the segment register to use for
10156 * this access. The base and limits are checked.
10157 * @param GCPtrMem The address of the guest memory.
10158 * @param u128Value The value to store.
10159 */
10160DECL_NO_INLINE(IEM_STATIC, void)
10161iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10162{
10163 /* The lazy approach for now... */
10164 if ( (GCPtrMem & 15) == 0
10165 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10166 {
10167 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10168 pu128Dst->au64[0] = u128Value.au64[0];
10169 pu128Dst->au64[1] = u128Value.au64[1];
10170 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10171 return;
10172 }
10173
10174 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10175 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10176}
10177#endif
10178
10179
10180/**
10181 * Stores a data dqword.
10182 *
10183 * @returns Strict VBox status code.
10184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 * @param pu256Value Pointer to the value to store.
10189 */
10190IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10191{
10192 /* The lazy approach for now... */
10193 PRTUINT256U pu256Dst;
10194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10195 if (rc == VINF_SUCCESS)
10196 {
10197 pu256Dst->au64[0] = pu256Value->au64[0];
10198 pu256Dst->au64[1] = pu256Value->au64[1];
10199 pu256Dst->au64[2] = pu256Value->au64[2];
10200 pu256Dst->au64[3] = pu256Value->au64[3];
10201 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10202 }
10203 return rc;
10204}
10205
10206
10207#ifdef IEM_WITH_SETJMP
10208/**
10209 * Stores a data dqword, longjmp on error.
10210 *
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param iSegReg The index of the segment register to use for
10213 * this access. The base and limits are checked.
10214 * @param GCPtrMem The address of the guest memory.
10215 * @param pu256Value Pointer to the value to store.
10216 */
10217IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10218{
10219 /* The lazy approach for now... */
10220 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10221 pu256Dst->au64[0] = pu256Value->au64[0];
10222 pu256Dst->au64[1] = pu256Value->au64[1];
10223 pu256Dst->au64[2] = pu256Value->au64[2];
10224 pu256Dst->au64[3] = pu256Value->au64[3];
10225 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10226}
10227#endif
10228
10229
10230/**
10231 * Stores a data dqword, AVX aligned.
10232 *
10233 * @returns Strict VBox status code.
10234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10235 * @param iSegReg The index of the segment register to use for
10236 * this access. The base and limits are checked.
10237 * @param GCPtrMem The address of the guest memory.
10238 * @param pu256Value Pointer to the value to store.
10239 */
10240IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10241{
10242 /* The lazy approach for now... */
10243 if (GCPtrMem & 31)
10244 return iemRaiseGeneralProtectionFault0(pVCpu);
10245
10246 PRTUINT256U pu256Dst;
10247 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10248 if (rc == VINF_SUCCESS)
10249 {
10250 pu256Dst->au64[0] = pu256Value->au64[0];
10251 pu256Dst->au64[1] = pu256Value->au64[1];
10252 pu256Dst->au64[2] = pu256Value->au64[2];
10253 pu256Dst->au64[3] = pu256Value->au64[3];
10254 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10255 }
10256 return rc;
10257}
10258
10259
10260#ifdef IEM_WITH_SETJMP
10261/**
10262 * Stores a data dqword, AVX aligned.
10263 *
10264 * @returns Strict VBox status code.
10265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10266 * @param iSegReg The index of the segment register to use for
10267 * this access. The base and limits are checked.
10268 * @param GCPtrMem The address of the guest memory.
10269 * @param pu256Value Pointer to the value to store.
10270 */
10271DECL_NO_INLINE(IEM_STATIC, void)
10272iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10273{
10274 /* The lazy approach for now... */
10275 if ((GCPtrMem & 31) == 0)
10276 {
10277 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10278 pu256Dst->au64[0] = pu256Value->au64[0];
10279 pu256Dst->au64[1] = pu256Value->au64[1];
10280 pu256Dst->au64[2] = pu256Value->au64[2];
10281 pu256Dst->au64[3] = pu256Value->au64[3];
10282 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10283 return;
10284 }
10285
10286 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10287 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10288}
10289#endif
10290
10291
10292/**
10293 * Stores a descriptor register (sgdt, sidt).
10294 *
10295 * @returns Strict VBox status code.
10296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10297 * @param cbLimit The limit.
10298 * @param GCPtrBase The base address.
10299 * @param iSegReg The index of the segment register to use for
10300 * this access. The base and limits are checked.
10301 * @param GCPtrMem The address of the guest memory.
10302 */
10303IEM_STATIC VBOXSTRICTRC
10304iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10305{
10306 /*
10307 * The SIDT and SGDT instructions actually stores the data using two
10308 * independent writes. The instructions does not respond to opsize prefixes.
10309 */
10310 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10311 if (rcStrict == VINF_SUCCESS)
10312 {
10313 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10314 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10315 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10316 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10317 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10318 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10319 else
10320 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10321 }
10322 return rcStrict;
10323}
10324
10325
10326/**
10327 * Pushes a word onto the stack.
10328 *
10329 * @returns Strict VBox status code.
10330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10331 * @param u16Value The value to push.
10332 */
10333IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10334{
10335 /* Increment the stack pointer. */
10336 uint64_t uNewRsp;
10337 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10338 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10339
10340 /* Write the word the lazy way. */
10341 uint16_t *pu16Dst;
10342 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10343 if (rc == VINF_SUCCESS)
10344 {
10345 *pu16Dst = u16Value;
10346 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10347 }
10348
10349 /* Commit the new RSP value unless we an access handler made trouble. */
10350 if (rc == VINF_SUCCESS)
10351 pCtx->rsp = uNewRsp;
10352
10353 return rc;
10354}
10355
10356
10357/**
10358 * Pushes a dword onto the stack.
10359 *
10360 * @returns Strict VBox status code.
10361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10362 * @param u32Value The value to push.
10363 */
10364IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10365{
10366 /* Increment the stack pointer. */
10367 uint64_t uNewRsp;
10368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10369 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10370
10371 /* Write the dword the lazy way. */
10372 uint32_t *pu32Dst;
10373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10374 if (rc == VINF_SUCCESS)
10375 {
10376 *pu32Dst = u32Value;
10377 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10378 }
10379
10380 /* Commit the new RSP value unless we an access handler made trouble. */
10381 if (rc == VINF_SUCCESS)
10382 pCtx->rsp = uNewRsp;
10383
10384 return rc;
10385}
10386
10387
10388/**
10389 * Pushes a dword segment register value onto the stack.
10390 *
10391 * @returns Strict VBox status code.
10392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10393 * @param u32Value The value to push.
10394 */
10395IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10396{
10397 /* Increment the stack pointer. */
10398 uint64_t uNewRsp;
10399 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10400 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10401
10402 VBOXSTRICTRC rc;
10403 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10404 {
10405 /* The recompiler writes a full dword. */
10406 uint32_t *pu32Dst;
10407 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10408 if (rc == VINF_SUCCESS)
10409 {
10410 *pu32Dst = u32Value;
10411 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10412 }
10413 }
10414 else
10415 {
10416 /* The intel docs talks about zero extending the selector register
10417 value. My actual intel CPU here might be zero extending the value
10418 but it still only writes the lower word... */
10419 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10420 * happens when crossing an electric page boundrary, is the high word checked
10421 * for write accessibility or not? Probably it is. What about segment limits?
10422 * It appears this behavior is also shared with trap error codes.
10423 *
10424 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10425 * ancient hardware when it actually did change. */
10426 uint16_t *pu16Dst;
10427 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10428 if (rc == VINF_SUCCESS)
10429 {
10430 *pu16Dst = (uint16_t)u32Value;
10431 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10432 }
10433 }
10434
10435 /* Commit the new RSP value unless we an access handler made trouble. */
10436 if (rc == VINF_SUCCESS)
10437 pCtx->rsp = uNewRsp;
10438
10439 return rc;
10440}
10441
10442
10443/**
10444 * Pushes a qword onto the stack.
10445 *
10446 * @returns Strict VBox status code.
10447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10448 * @param u64Value The value to push.
10449 */
10450IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10451{
10452 /* Increment the stack pointer. */
10453 uint64_t uNewRsp;
10454 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10455 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10456
10457 /* Write the word the lazy way. */
10458 uint64_t *pu64Dst;
10459 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10460 if (rc == VINF_SUCCESS)
10461 {
10462 *pu64Dst = u64Value;
10463 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10464 }
10465
10466 /* Commit the new RSP value unless we an access handler made trouble. */
10467 if (rc == VINF_SUCCESS)
10468 pCtx->rsp = uNewRsp;
10469
10470 return rc;
10471}
10472
10473
10474/**
10475 * Pops a word from the stack.
10476 *
10477 * @returns Strict VBox status code.
10478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10479 * @param pu16Value Where to store the popped value.
10480 */
10481IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10482{
10483 /* Increment the stack pointer. */
10484 uint64_t uNewRsp;
10485 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10486 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10487
10488 /* Write the word the lazy way. */
10489 uint16_t const *pu16Src;
10490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10491 if (rc == VINF_SUCCESS)
10492 {
10493 *pu16Value = *pu16Src;
10494 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10495
10496 /* Commit the new RSP value. */
10497 if (rc == VINF_SUCCESS)
10498 pCtx->rsp = uNewRsp;
10499 }
10500
10501 return rc;
10502}
10503
10504
10505/**
10506 * Pops a dword from the stack.
10507 *
10508 * @returns Strict VBox status code.
10509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10510 * @param pu32Value Where to store the popped value.
10511 */
10512IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10513{
10514 /* Increment the stack pointer. */
10515 uint64_t uNewRsp;
10516 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10517 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10518
10519 /* Write the word the lazy way. */
10520 uint32_t const *pu32Src;
10521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10522 if (rc == VINF_SUCCESS)
10523 {
10524 *pu32Value = *pu32Src;
10525 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10526
10527 /* Commit the new RSP value. */
10528 if (rc == VINF_SUCCESS)
10529 pCtx->rsp = uNewRsp;
10530 }
10531
10532 return rc;
10533}
10534
10535
10536/**
10537 * Pops a qword from the stack.
10538 *
10539 * @returns Strict VBox status code.
10540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10541 * @param pu64Value Where to store the popped value.
10542 */
10543IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10544{
10545 /* Increment the stack pointer. */
10546 uint64_t uNewRsp;
10547 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10548 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10549
10550 /* Write the word the lazy way. */
10551 uint64_t const *pu64Src;
10552 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10553 if (rc == VINF_SUCCESS)
10554 {
10555 *pu64Value = *pu64Src;
10556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10557
10558 /* Commit the new RSP value. */
10559 if (rc == VINF_SUCCESS)
10560 pCtx->rsp = uNewRsp;
10561 }
10562
10563 return rc;
10564}
10565
10566
10567/**
10568 * Pushes a word onto the stack, using a temporary stack pointer.
10569 *
10570 * @returns Strict VBox status code.
10571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10572 * @param u16Value The value to push.
10573 * @param pTmpRsp Pointer to the temporary stack pointer.
10574 */
10575IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10576{
10577 /* Increment the stack pointer. */
10578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10579 RTUINT64U NewRsp = *pTmpRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10581
10582 /* Write the word the lazy way. */
10583 uint16_t *pu16Dst;
10584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10585 if (rc == VINF_SUCCESS)
10586 {
10587 *pu16Dst = u16Value;
10588 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10589 }
10590
10591 /* Commit the new RSP value unless we an access handler made trouble. */
10592 if (rc == VINF_SUCCESS)
10593 *pTmpRsp = NewRsp;
10594
10595 return rc;
10596}
10597
10598
10599/**
10600 * Pushes a dword onto the stack, using a temporary stack pointer.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param u32Value The value to push.
10605 * @param pTmpRsp Pointer to the temporary stack pointer.
10606 */
10607IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10608{
10609 /* Increment the stack pointer. */
10610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10611 RTUINT64U NewRsp = *pTmpRsp;
10612 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10613
10614 /* Write the word the lazy way. */
10615 uint32_t *pu32Dst;
10616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10617 if (rc == VINF_SUCCESS)
10618 {
10619 *pu32Dst = u32Value;
10620 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10621 }
10622
10623 /* Commit the new RSP value unless we an access handler made trouble. */
10624 if (rc == VINF_SUCCESS)
10625 *pTmpRsp = NewRsp;
10626
10627 return rc;
10628}
10629
10630
10631/**
10632 * Pushes a dword onto the stack, using a temporary stack pointer.
10633 *
10634 * @returns Strict VBox status code.
10635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10636 * @param u64Value The value to push.
10637 * @param pTmpRsp Pointer to the temporary stack pointer.
10638 */
10639IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10640{
10641 /* Increment the stack pointer. */
10642 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10643 RTUINT64U NewRsp = *pTmpRsp;
10644 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10645
10646 /* Write the word the lazy way. */
10647 uint64_t *pu64Dst;
10648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10649 if (rc == VINF_SUCCESS)
10650 {
10651 *pu64Dst = u64Value;
10652 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10653 }
10654
10655 /* Commit the new RSP value unless we an access handler made trouble. */
10656 if (rc == VINF_SUCCESS)
10657 *pTmpRsp = NewRsp;
10658
10659 return rc;
10660}
10661
10662
10663/**
10664 * Pops a word from the stack, using a temporary stack pointer.
10665 *
10666 * @returns Strict VBox status code.
10667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10668 * @param pu16Value Where to store the popped value.
10669 * @param pTmpRsp Pointer to the temporary stack pointer.
10670 */
10671IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10672{
10673 /* Increment the stack pointer. */
10674 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10675 RTUINT64U NewRsp = *pTmpRsp;
10676 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10677
10678 /* Write the word the lazy way. */
10679 uint16_t const *pu16Src;
10680 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10681 if (rc == VINF_SUCCESS)
10682 {
10683 *pu16Value = *pu16Src;
10684 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10685
10686 /* Commit the new RSP value. */
10687 if (rc == VINF_SUCCESS)
10688 *pTmpRsp = NewRsp;
10689 }
10690
10691 return rc;
10692}
10693
10694
10695/**
10696 * Pops a dword from the stack, using a temporary stack pointer.
10697 *
10698 * @returns Strict VBox status code.
10699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10700 * @param pu32Value Where to store the popped value.
10701 * @param pTmpRsp Pointer to the temporary stack pointer.
10702 */
10703IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10704{
10705 /* Increment the stack pointer. */
10706 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10707 RTUINT64U NewRsp = *pTmpRsp;
10708 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10709
10710 /* Write the word the lazy way. */
10711 uint32_t const *pu32Src;
10712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10713 if (rc == VINF_SUCCESS)
10714 {
10715 *pu32Value = *pu32Src;
10716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10717
10718 /* Commit the new RSP value. */
10719 if (rc == VINF_SUCCESS)
10720 *pTmpRsp = NewRsp;
10721 }
10722
10723 return rc;
10724}
10725
10726
10727/**
10728 * Pops a qword from the stack, using a temporary stack pointer.
10729 *
10730 * @returns Strict VBox status code.
10731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10732 * @param pu64Value Where to store the popped value.
10733 * @param pTmpRsp Pointer to the temporary stack pointer.
10734 */
10735IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10736{
10737 /* Increment the stack pointer. */
10738 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10739 RTUINT64U NewRsp = *pTmpRsp;
10740 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10741
10742 /* Write the word the lazy way. */
10743 uint64_t const *pu64Src;
10744 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10745 if (rcStrict == VINF_SUCCESS)
10746 {
10747 *pu64Value = *pu64Src;
10748 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10749
10750 /* Commit the new RSP value. */
10751 if (rcStrict == VINF_SUCCESS)
10752 *pTmpRsp = NewRsp;
10753 }
10754
10755 return rcStrict;
10756}
10757
10758
10759/**
10760 * Begin a special stack push (used by interrupt, exceptions and such).
10761 *
10762 * This will raise \#SS or \#PF if appropriate.
10763 *
10764 * @returns Strict VBox status code.
10765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10766 * @param cbMem The number of bytes to push onto the stack.
10767 * @param ppvMem Where to return the pointer to the stack memory.
10768 * As with the other memory functions this could be
10769 * direct access or bounce buffered access, so
10770 * don't commit register until the commit call
10771 * succeeds.
10772 * @param puNewRsp Where to return the new RSP value. This must be
10773 * passed unchanged to
10774 * iemMemStackPushCommitSpecial().
10775 */
10776IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10777{
10778 Assert(cbMem < UINT8_MAX);
10779 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10780 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10781 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10782}
10783
10784
10785/**
10786 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10787 *
10788 * This will update the rSP.
10789 *
10790 * @returns Strict VBox status code.
10791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10792 * @param pvMem The pointer returned by
10793 * iemMemStackPushBeginSpecial().
10794 * @param uNewRsp The new RSP value returned by
10795 * iemMemStackPushBeginSpecial().
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10798{
10799 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10800 if (rcStrict == VINF_SUCCESS)
10801 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10802 return rcStrict;
10803}
10804
10805
10806/**
10807 * Begin a special stack pop (used by iret, retf and such).
10808 *
10809 * This will raise \#SS or \#PF if appropriate.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param cbMem The number of bytes to pop from the stack.
10814 * @param ppvMem Where to return the pointer to the stack memory.
10815 * @param puNewRsp Where to return the new RSP value. This must be
10816 * assigned to CPUMCTX::rsp manually some time
10817 * after iemMemStackPopDoneSpecial() has been
10818 * called.
10819 */
10820IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10821{
10822 Assert(cbMem < UINT8_MAX);
10823 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10824 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10825 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10826}
10827
10828
10829/**
10830 * Continue a special stack pop (used by iret and retf).
10831 *
10832 * This will raise \#SS or \#PF if appropriate.
10833 *
10834 * @returns Strict VBox status code.
10835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10836 * @param cbMem The number of bytes to pop from the stack.
10837 * @param ppvMem Where to return the pointer to the stack memory.
10838 * @param puNewRsp Where to return the new RSP value. This must be
10839 * assigned to CPUMCTX::rsp manually some time
10840 * after iemMemStackPopDoneSpecial() has been
10841 * called.
10842 */
10843IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10844{
10845 Assert(cbMem < UINT8_MAX);
10846 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10847 RTUINT64U NewRsp;
10848 NewRsp.u = *puNewRsp;
10849 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10850 *puNewRsp = NewRsp.u;
10851 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10852}
10853
10854
10855/**
10856 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10857 * iemMemStackPopContinueSpecial).
10858 *
10859 * The caller will manually commit the rSP.
10860 *
10861 * @returns Strict VBox status code.
10862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10863 * @param pvMem The pointer returned by
10864 * iemMemStackPopBeginSpecial() or
10865 * iemMemStackPopContinueSpecial().
10866 */
10867IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10868{
10869 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10870}
10871
10872
10873/**
10874 * Fetches a system table byte.
10875 *
10876 * @returns Strict VBox status code.
10877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10878 * @param pbDst Where to return the byte.
10879 * @param iSegReg The index of the segment register to use for
10880 * this access. The base and limits are checked.
10881 * @param GCPtrMem The address of the guest memory.
10882 */
10883IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10884{
10885 /* The lazy approach for now... */
10886 uint8_t const *pbSrc;
10887 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10888 if (rc == VINF_SUCCESS)
10889 {
10890 *pbDst = *pbSrc;
10891 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10892 }
10893 return rc;
10894}
10895
10896
10897/**
10898 * Fetches a system table word.
10899 *
10900 * @returns Strict VBox status code.
10901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10902 * @param pu16Dst Where to return the word.
10903 * @param iSegReg The index of the segment register to use for
10904 * this access. The base and limits are checked.
10905 * @param GCPtrMem The address of the guest memory.
10906 */
10907IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10908{
10909 /* The lazy approach for now... */
10910 uint16_t const *pu16Src;
10911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10912 if (rc == VINF_SUCCESS)
10913 {
10914 *pu16Dst = *pu16Src;
10915 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10916 }
10917 return rc;
10918}
10919
10920
10921/**
10922 * Fetches a system table dword.
10923 *
10924 * @returns Strict VBox status code.
10925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10926 * @param pu32Dst Where to return the dword.
10927 * @param iSegReg The index of the segment register to use for
10928 * this access. The base and limits are checked.
10929 * @param GCPtrMem The address of the guest memory.
10930 */
10931IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10932{
10933 /* The lazy approach for now... */
10934 uint32_t const *pu32Src;
10935 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10936 if (rc == VINF_SUCCESS)
10937 {
10938 *pu32Dst = *pu32Src;
10939 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10940 }
10941 return rc;
10942}
10943
10944
10945/**
10946 * Fetches a system table qword.
10947 *
10948 * @returns Strict VBox status code.
10949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10950 * @param pu64Dst Where to return the qword.
10951 * @param iSegReg The index of the segment register to use for
10952 * this access. The base and limits are checked.
10953 * @param GCPtrMem The address of the guest memory.
10954 */
10955IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10956{
10957 /* The lazy approach for now... */
10958 uint64_t const *pu64Src;
10959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10960 if (rc == VINF_SUCCESS)
10961 {
10962 *pu64Dst = *pu64Src;
10963 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10964 }
10965 return rc;
10966}
10967
10968
10969/**
10970 * Fetches a descriptor table entry with caller specified error code.
10971 *
10972 * @returns Strict VBox status code.
10973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10974 * @param pDesc Where to return the descriptor table entry.
10975 * @param uSel The selector which table entry to fetch.
10976 * @param uXcpt The exception to raise on table lookup error.
10977 * @param uErrorCode The error code associated with the exception.
10978 */
10979IEM_STATIC VBOXSTRICTRC
10980iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10981{
10982 AssertPtr(pDesc);
10983 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10984
10985 /** @todo did the 286 require all 8 bytes to be accessible? */
10986 /*
10987 * Get the selector table base and check bounds.
10988 */
10989 RTGCPTR GCPtrBase;
10990 if (uSel & X86_SEL_LDT)
10991 {
10992 if ( !pCtx->ldtr.Attr.n.u1Present
10993 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10994 {
10995 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10996 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10997 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10998 uErrorCode, 0);
10999 }
11000
11001 Assert(pCtx->ldtr.Attr.n.u1Present);
11002 GCPtrBase = pCtx->ldtr.u64Base;
11003 }
11004 else
11005 {
11006 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11007 {
11008 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11009 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11010 uErrorCode, 0);
11011 }
11012 GCPtrBase = pCtx->gdtr.pGdt;
11013 }
11014
11015 /*
11016 * Read the legacy descriptor and maybe the long mode extensions if
11017 * required.
11018 */
11019 VBOXSTRICTRC rcStrict;
11020 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11021 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11022 else
11023 {
11024 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11025 if (rcStrict == VINF_SUCCESS)
11026 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11027 if (rcStrict == VINF_SUCCESS)
11028 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11029 if (rcStrict == VINF_SUCCESS)
11030 pDesc->Legacy.au16[3] = 0;
11031 else
11032 return rcStrict;
11033 }
11034
11035 if (rcStrict == VINF_SUCCESS)
11036 {
11037 if ( !IEM_IS_LONG_MODE(pVCpu)
11038 || pDesc->Legacy.Gen.u1DescType)
11039 pDesc->Long.au64[1] = 0;
11040 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11041 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11042 else
11043 {
11044 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11045 /** @todo is this the right exception? */
11046 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11047 }
11048 }
11049 return rcStrict;
11050}
11051
11052
11053/**
11054 * Fetches a descriptor table entry.
11055 *
11056 * @returns Strict VBox status code.
11057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11058 * @param pDesc Where to return the descriptor table entry.
11059 * @param uSel The selector which table entry to fetch.
11060 * @param uXcpt The exception to raise on table lookup error.
11061 */
11062IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11063{
11064 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11065}
11066
11067
11068/**
11069 * Fakes a long mode stack selector for SS = 0.
11070 *
11071 * @param pDescSs Where to return the fake stack descriptor.
11072 * @param uDpl The DPL we want.
11073 */
11074IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11075{
11076 pDescSs->Long.au64[0] = 0;
11077 pDescSs->Long.au64[1] = 0;
11078 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11079 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11080 pDescSs->Long.Gen.u2Dpl = uDpl;
11081 pDescSs->Long.Gen.u1Present = 1;
11082 pDescSs->Long.Gen.u1Long = 1;
11083}
11084
11085
11086/**
11087 * Marks the selector descriptor as accessed (only non-system descriptors).
11088 *
11089 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11090 * will therefore skip the limit checks.
11091 *
11092 * @returns Strict VBox status code.
11093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11094 * @param uSel The selector.
11095 */
11096IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11097{
11098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11099
11100 /*
11101 * Get the selector table base and calculate the entry address.
11102 */
11103 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11104 ? pCtx->ldtr.u64Base
11105 : pCtx->gdtr.pGdt;
11106 GCPtr += uSel & X86_SEL_MASK;
11107
11108 /*
11109 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11110 * ugly stuff to avoid this. This will make sure it's an atomic access
11111 * as well more or less remove any question about 8-bit or 32-bit accesss.
11112 */
11113 VBOXSTRICTRC rcStrict;
11114 uint32_t volatile *pu32;
11115 if ((GCPtr & 3) == 0)
11116 {
11117 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11118 GCPtr += 2 + 2;
11119 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11120 if (rcStrict != VINF_SUCCESS)
11121 return rcStrict;
11122 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11123 }
11124 else
11125 {
11126 /* The misaligned GDT/LDT case, map the whole thing. */
11127 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11128 if (rcStrict != VINF_SUCCESS)
11129 return rcStrict;
11130 switch ((uintptr_t)pu32 & 3)
11131 {
11132 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11133 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11134 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11135 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11136 }
11137 }
11138
11139 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11140}
11141
11142/** @} */
11143
11144
11145/*
11146 * Include the C/C++ implementation of instruction.
11147 */
11148#include "IEMAllCImpl.cpp.h"
11149
11150
11151
11152/** @name "Microcode" macros.
11153 *
11154 * The idea is that we should be able to use the same code to interpret
11155 * instructions as well as recompiler instructions. Thus this obfuscation.
11156 *
11157 * @{
11158 */
11159#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11160#define IEM_MC_END() }
11161#define IEM_MC_PAUSE() do {} while (0)
11162#define IEM_MC_CONTINUE() do {} while (0)
11163
11164/** Internal macro. */
11165#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11166 do \
11167 { \
11168 VBOXSTRICTRC rcStrict2 = a_Expr; \
11169 if (rcStrict2 != VINF_SUCCESS) \
11170 return rcStrict2; \
11171 } while (0)
11172
11173
11174#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11175#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11176#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11177#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11178#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11179#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11180#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11181#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11182#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11183 do { \
11184 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11185 return iemRaiseDeviceNotAvailable(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11188 do { \
11189 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11190 return iemRaiseDeviceNotAvailable(pVCpu); \
11191 } while (0)
11192#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11193 do { \
11194 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11195 return iemRaiseMathFault(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11198 do { \
11199 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11200 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11207 do { \
11208 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11209 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11210 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11213 return iemRaiseDeviceNotAvailable(pVCpu); \
11214 } while (0)
11215#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11216 do { \
11217 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11218 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11219 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11220 return iemRaiseUndefinedOpcode(pVCpu); \
11221 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11222 return iemRaiseDeviceNotAvailable(pVCpu); \
11223 } while (0)
11224#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11225 do { \
11226 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11227 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11228 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11229 return iemRaiseUndefinedOpcode(pVCpu); \
11230 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11231 return iemRaiseDeviceNotAvailable(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11234 do { \
11235 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11236 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11237 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11238 return iemRaiseUndefinedOpcode(pVCpu); \
11239 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11240 return iemRaiseDeviceNotAvailable(pVCpu); \
11241 } while (0)
11242#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11243 do { \
11244 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11245 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11246 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11247 return iemRaiseUndefinedOpcode(pVCpu); \
11248 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11249 return iemRaiseDeviceNotAvailable(pVCpu); \
11250 } while (0)
11251#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11252 do { \
11253 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11254 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11255 return iemRaiseUndefinedOpcode(pVCpu); \
11256 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11257 return iemRaiseDeviceNotAvailable(pVCpu); \
11258 } while (0)
11259#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11260 do { \
11261 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11262 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11263 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11264 return iemRaiseUndefinedOpcode(pVCpu); \
11265 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11266 return iemRaiseDeviceNotAvailable(pVCpu); \
11267 } while (0)
11268#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11269 do { \
11270 if (pVCpu->iem.s.uCpl != 0) \
11271 return iemRaiseGeneralProtectionFault0(pVCpu); \
11272 } while (0)
11273#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11274 do { \
11275 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11276 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11277 } while (0)
11278#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11279 do { \
11280 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11281 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11282 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11283 return iemRaiseUndefinedOpcode(pVCpu); \
11284 } while (0)
11285#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11286 do { \
11287 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11288 return iemRaiseGeneralProtectionFault0(pVCpu); \
11289 } while (0)
11290
11291
11292#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11293#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11294#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11295#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11296#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11297#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11298#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11299 uint32_t a_Name; \
11300 uint32_t *a_pName = &a_Name
11301#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11302 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11303
11304#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11305#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11306
11307#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11320#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11321#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11322#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11323#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11324#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11325#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11326#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11327#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11328#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11329#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11330#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11331#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11332#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11333#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11334#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11335#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11336#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11337#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11338/** @note Not for IOPL or IF testing or modification. */
11339#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11340#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11341#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11342#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11343
11344#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11345#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11346#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11347#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11348#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11349#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11350#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11351#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11352#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11353#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11354#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11355#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11356#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11357 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11358
11359
11360#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11361#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11362/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11363 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11364#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11365#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11366/** @note Not for IOPL or IF testing or modification. */
11367#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11368
11369#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11370#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11371#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11372 do { \
11373 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11374 *pu32Reg += (a_u32Value); \
11375 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11376 } while (0)
11377#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11378
11379#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11380#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11381#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11382 do { \
11383 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11384 *pu32Reg -= (a_u32Value); \
11385 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11386 } while (0)
11387#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11388#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11389
11390#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11391#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11392#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11393#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11394#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11395#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11396#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11397
11398#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11399#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11400#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11401#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11402
11403#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11404#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11405#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11406
11407#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11408#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11409#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11410
11411#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11412#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11413#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11414
11415#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11416#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11417#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11418
11419#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11420
11421#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11422
11423#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11424#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11425#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11426 do { \
11427 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11428 *pu32Reg &= (a_u32Value); \
11429 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11430 } while (0)
11431#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11432
11433#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11434#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11435#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11436 do { \
11437 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11438 *pu32Reg |= (a_u32Value); \
11439 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11440 } while (0)
11441#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11442
11443
11444/** @note Not for IOPL or IF modification. */
11445#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11446/** @note Not for IOPL or IF modification. */
11447#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11448/** @note Not for IOPL or IF modification. */
11449#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11450
11451#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11452
11453/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11454#define IEM_MC_FPU_TO_MMX_MODE() do { \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11456 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11457 } while (0)
11458
11459/** Switches the FPU state from MMX mode (FTW=0xffff). */
11460#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11461 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11462 } while (0)
11463
11464#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11465 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11466#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11467 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11468#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11469 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11470 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11471 } while (0)
11472#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11473 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11474 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11475 } while (0)
11476#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11477 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11478#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11479 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11480#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11481 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11482
11483#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11484 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11485 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11486 } while (0)
11487#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11488 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11489#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11490 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11491#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11492 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11493#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11494 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11495 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11496 } while (0)
11497#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11498 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11499#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11500 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11501 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11502 } while (0)
11503#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11504 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11505#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11506 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11507 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11508 } while (0)
11509#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11510 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11511#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11512 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11513#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11514 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11515#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11516 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11517#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11518 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11519 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11520 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11521 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11522 } while (0)
11523
11524#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11525 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11526 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11527 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11528 } while (0)
11529#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11530 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11531 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11532 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11533 } while (0)
11534#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11535 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11536 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11537 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11538 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11539 } while (0)
11540#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11541 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11542 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11543 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11544 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11545 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11546 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11547 } while (0)
11548
11549#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11550#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11551 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11552 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11558 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11559 } while (0)
11560#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11561 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11562 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11568 } while (0)
11569#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11570 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11571 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11576 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11577 } while (0)
11578#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11579 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11580 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11581 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11585 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11586 } while (0)
11587
11588#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11589 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11590#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11591 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11592#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11593 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11594#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11595 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11596 uintptr_t const iYRegTmp = (a_iYReg); \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11598 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11599 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11600 } while (0)
11601
11602#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11603 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11604 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11605 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11606 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11607 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11609 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11610 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11611 } while (0)
11612#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11613 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11614 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11615 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11618 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11620 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11621 } while (0)
11622#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11623 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11624 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11625 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11626 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11628 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11630 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11631 } while (0)
11632
11633#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11634 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11635 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11636 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11637 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11638 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11641 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11643 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11644 } while (0)
11645#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11646 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11647 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11648 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11649 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11650 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11651 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11652 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11653 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11654 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11655 } while (0)
11656#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11657 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11658 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11659 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11660 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11661 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11662 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11663 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11664 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11665 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11666 } while (0)
11667#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11668 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11669 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11670 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11671 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11672 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11673 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11674 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11675 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11676 } while (0)
11677
11678#ifndef IEM_WITH_SETJMP
11679# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11681# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11683# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11685#else
11686# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11687 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11688# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11689 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11690# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11692#endif
11693
11694#ifndef IEM_WITH_SETJMP
11695# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11697# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11699# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11701#else
11702# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11703 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11704# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11705 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11706# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11707 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11708#endif
11709
11710#ifndef IEM_WITH_SETJMP
11711# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11713# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11715# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11717#else
11718# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11719 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11720# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11721 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11722# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11723 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11724#endif
11725
11726#ifdef SOME_UNUSED_FUNCTION
11727# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11729#endif
11730
11731#ifndef IEM_WITH_SETJMP
11732# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11734# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11736# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11738# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11740#else
11741# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11742 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11743# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11744 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11745# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11746 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749#endif
11750
11751#ifndef IEM_WITH_SETJMP
11752# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11758#else
11759# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11760 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11764 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11765#endif
11766
11767#ifndef IEM_WITH_SETJMP
11768# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11770# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11772#else
11773# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11774 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11775# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11776 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11777#endif
11778
11779#ifndef IEM_WITH_SETJMP
11780# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11782# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11784#else
11785# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11786 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11787# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11788 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11789#endif
11790
11791
11792
11793#ifndef IEM_WITH_SETJMP
11794# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11795 do { \
11796 uint8_t u8Tmp; \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11798 (a_u16Dst) = u8Tmp; \
11799 } while (0)
11800# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11801 do { \
11802 uint8_t u8Tmp; \
11803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11804 (a_u32Dst) = u8Tmp; \
11805 } while (0)
11806# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 do { \
11808 uint8_t u8Tmp; \
11809 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11810 (a_u64Dst) = u8Tmp; \
11811 } while (0)
11812# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11813 do { \
11814 uint16_t u16Tmp; \
11815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11816 (a_u32Dst) = u16Tmp; \
11817 } while (0)
11818# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11819 do { \
11820 uint16_t u16Tmp; \
11821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11822 (a_u64Dst) = u16Tmp; \
11823 } while (0)
11824# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11825 do { \
11826 uint32_t u32Tmp; \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11828 (a_u64Dst) = u32Tmp; \
11829 } while (0)
11830#else /* IEM_WITH_SETJMP */
11831# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11832 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11833# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11834 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11835# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11838 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11839# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843#endif /* IEM_WITH_SETJMP */
11844
11845#ifndef IEM_WITH_SETJMP
11846# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11847 do { \
11848 uint8_t u8Tmp; \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11850 (a_u16Dst) = (int8_t)u8Tmp; \
11851 } while (0)
11852# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11853 do { \
11854 uint8_t u8Tmp; \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11856 (a_u32Dst) = (int8_t)u8Tmp; \
11857 } while (0)
11858# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11859 do { \
11860 uint8_t u8Tmp; \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11862 (a_u64Dst) = (int8_t)u8Tmp; \
11863 } while (0)
11864# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11865 do { \
11866 uint16_t u16Tmp; \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11868 (a_u32Dst) = (int16_t)u16Tmp; \
11869 } while (0)
11870# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11871 do { \
11872 uint16_t u16Tmp; \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11874 (a_u64Dst) = (int16_t)u16Tmp; \
11875 } while (0)
11876# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11877 do { \
11878 uint32_t u32Tmp; \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11880 (a_u64Dst) = (int32_t)u32Tmp; \
11881 } while (0)
11882#else /* IEM_WITH_SETJMP */
11883# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11884 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11885# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11886 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11887# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11888 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11889# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11890 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11891# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11892 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11893# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895#endif /* IEM_WITH_SETJMP */
11896
11897#ifndef IEM_WITH_SETJMP
11898# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11900# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11902# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11904# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11906#else
11907# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11908 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11909# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11910 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11911# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11912 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11913# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11914 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11915#endif
11916
11917#ifndef IEM_WITH_SETJMP
11918# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11920# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11922# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11924# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11926#else
11927# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11928 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11929# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11930 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11931# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11932 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11933# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11934 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11935#endif
11936
11937#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11938#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11939#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11940#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11941#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11942#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11943#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11944 do { \
11945 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11946 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11947 } while (0)
11948
11949#ifndef IEM_WITH_SETJMP
11950# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11952# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11954#else
11955# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11956 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11957# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11958 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11959#endif
11960
11961#ifndef IEM_WITH_SETJMP
11962# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11963 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11964# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11965 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11966#else
11967# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11968 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11969# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11970 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11971#endif
11972
11973
11974#define IEM_MC_PUSH_U16(a_u16Value) \
11975 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11976#define IEM_MC_PUSH_U32(a_u32Value) \
11977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11978#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11980#define IEM_MC_PUSH_U64(a_u64Value) \
11981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11982
11983#define IEM_MC_POP_U16(a_pu16Value) \
11984 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11985#define IEM_MC_POP_U32(a_pu32Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11987#define IEM_MC_POP_U64(a_pu64Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11989
11990/** Maps guest memory for direct or bounce buffered access.
11991 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11992 * @remarks May return.
11993 */
11994#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11995 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11996
11997/** Maps guest memory for direct or bounce buffered access.
11998 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11999 * @remarks May return.
12000 */
12001#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12002 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12003
12004/** Commits the memory and unmaps the guest memory.
12005 * @remarks May return.
12006 */
12007#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12008 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12009
12010/** Commits the memory and unmaps the guest memory unless the FPU status word
12011 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12012 * that would cause FLD not to store.
12013 *
12014 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12015 * store, while \#P will not.
12016 *
12017 * @remarks May in theory return - for now.
12018 */
12019#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12020 do { \
12021 if ( !(a_u16FSW & X86_FSW_ES) \
12022 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12023 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12024 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12025 } while (0)
12026
12027/** Calculate efficient address from R/M. */
12028#ifndef IEM_WITH_SETJMP
12029# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12030 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12031#else
12032# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12033 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12034#endif
12035
12036#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12037#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12038#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12039#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12040#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12041#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12042#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12043
12044/**
12045 * Defers the rest of the instruction emulation to a C implementation routine
12046 * and returns, only taking the standard parameters.
12047 *
12048 * @param a_pfnCImpl The pointer to the C routine.
12049 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12050 */
12051#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12052
12053/**
12054 * Defers the rest of instruction emulation to a C implementation routine and
12055 * returns, taking one argument in addition to the standard ones.
12056 *
12057 * @param a_pfnCImpl The pointer to the C routine.
12058 * @param a0 The argument.
12059 */
12060#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12061
12062/**
12063 * Defers the rest of the instruction emulation to a C implementation routine
12064 * and returns, taking two arguments in addition to the standard ones.
12065 *
12066 * @param a_pfnCImpl The pointer to the C routine.
12067 * @param a0 The first extra argument.
12068 * @param a1 The second extra argument.
12069 */
12070#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12071
12072/**
12073 * Defers the rest of the instruction emulation to a C implementation routine
12074 * and returns, taking three arguments in addition to the standard ones.
12075 *
12076 * @param a_pfnCImpl The pointer to the C routine.
12077 * @param a0 The first extra argument.
12078 * @param a1 The second extra argument.
12079 * @param a2 The third extra argument.
12080 */
12081#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12082
12083/**
12084 * Defers the rest of the instruction emulation to a C implementation routine
12085 * and returns, taking four arguments in addition to the standard ones.
12086 *
12087 * @param a_pfnCImpl The pointer to the C routine.
12088 * @param a0 The first extra argument.
12089 * @param a1 The second extra argument.
12090 * @param a2 The third extra argument.
12091 * @param a3 The fourth extra argument.
12092 */
12093#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12094
12095/**
12096 * Defers the rest of the instruction emulation to a C implementation routine
12097 * and returns, taking two arguments in addition to the standard ones.
12098 *
12099 * @param a_pfnCImpl The pointer to the C routine.
12100 * @param a0 The first extra argument.
12101 * @param a1 The second extra argument.
12102 * @param a2 The third extra argument.
12103 * @param a3 The fourth extra argument.
12104 * @param a4 The fifth extra argument.
12105 */
12106#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12107
12108/**
12109 * Defers the entire instruction emulation to a C implementation routine and
12110 * returns, only taking the standard parameters.
12111 *
12112 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12113 *
12114 * @param a_pfnCImpl The pointer to the C routine.
12115 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12116 */
12117#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12118
12119/**
12120 * Defers the entire instruction emulation to a C implementation routine and
12121 * returns, taking one argument in addition to the standard ones.
12122 *
12123 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12124 *
12125 * @param a_pfnCImpl The pointer to the C routine.
12126 * @param a0 The argument.
12127 */
12128#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12129
12130/**
12131 * Defers the entire instruction emulation to a C implementation routine and
12132 * returns, taking two arguments in addition to the standard ones.
12133 *
12134 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12135 *
12136 * @param a_pfnCImpl The pointer to the C routine.
12137 * @param a0 The first extra argument.
12138 * @param a1 The second extra argument.
12139 */
12140#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12141
12142/**
12143 * Defers the entire instruction emulation to a C implementation routine and
12144 * returns, taking three arguments in addition to the standard ones.
12145 *
12146 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12147 *
12148 * @param a_pfnCImpl The pointer to the C routine.
12149 * @param a0 The first extra argument.
12150 * @param a1 The second extra argument.
12151 * @param a2 The third extra argument.
12152 */
12153#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12154
12155/**
12156 * Calls a FPU assembly implementation taking one visible argument.
12157 *
12158 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12159 * @param a0 The first extra argument.
12160 */
12161#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12162 do { \
12163 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12164 } while (0)
12165
12166/**
12167 * Calls a FPU assembly implementation taking two visible arguments.
12168 *
12169 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12170 * @param a0 The first extra argument.
12171 * @param a1 The second extra argument.
12172 */
12173#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12174 do { \
12175 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12176 } while (0)
12177
12178/**
12179 * Calls a FPU assembly implementation taking three visible arguments.
12180 *
12181 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12182 * @param a0 The first extra argument.
12183 * @param a1 The second extra argument.
12184 * @param a2 The third extra argument.
12185 */
12186#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12187 do { \
12188 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12189 } while (0)
12190
12191#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12192 do { \
12193 (a_FpuData).FSW = (a_FSW); \
12194 (a_FpuData).r80Result = *(a_pr80Value); \
12195 } while (0)
12196
12197/** Pushes FPU result onto the stack. */
12198#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12199 iemFpuPushResult(pVCpu, &a_FpuData)
12200/** Pushes FPU result onto the stack and sets the FPUDP. */
12201#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12202 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12203
12204/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12205#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12206 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12207
12208/** Stores FPU result in a stack register. */
12209#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12210 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12211/** Stores FPU result in a stack register and pops the stack. */
12212#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12213 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12214/** Stores FPU result in a stack register and sets the FPUDP. */
12215#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12216 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12217/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12218 * stack. */
12219#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12220 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12221
12222/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12223#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12224 iemFpuUpdateOpcodeAndIp(pVCpu)
12225/** Free a stack register (for FFREE and FFREEP). */
12226#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12227 iemFpuStackFree(pVCpu, a_iStReg)
12228/** Increment the FPU stack pointer. */
12229#define IEM_MC_FPU_STACK_INC_TOP() \
12230 iemFpuStackIncTop(pVCpu)
12231/** Decrement the FPU stack pointer. */
12232#define IEM_MC_FPU_STACK_DEC_TOP() \
12233 iemFpuStackDecTop(pVCpu)
12234
12235/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12236#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12237 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12238/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12239#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12240 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12241/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12242#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12243 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12244/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12245#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12246 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12247/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12248 * stack. */
12249#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12250 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12251/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12252#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12253 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12254
12255/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12256#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12257 iemFpuStackUnderflow(pVCpu, a_iStDst)
12258/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12259 * stack. */
12260#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12261 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12262/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12263 * FPUDS. */
12264#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12265 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12266/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12267 * FPUDS. Pops stack. */
12268#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12269 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12270/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12271 * stack twice. */
12272#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12273 iemFpuStackUnderflowThenPopPop(pVCpu)
12274/** Raises a FPU stack underflow exception for an instruction pushing a result
12275 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12276#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12277 iemFpuStackPushUnderflow(pVCpu)
12278/** Raises a FPU stack underflow exception for an instruction pushing a result
12279 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12280#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12281 iemFpuStackPushUnderflowTwo(pVCpu)
12282
12283/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12284 * FPUIP, FPUCS and FOP. */
12285#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12286 iemFpuStackPushOverflow(pVCpu)
12287/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12288 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12289#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12290 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12291/** Prepares for using the FPU state.
12292 * Ensures that we can use the host FPU in the current context (RC+R0.
12293 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12294#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12295/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12296#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12297/** Actualizes the guest FPU state so it can be accessed and modified. */
12298#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12299
12300/** Prepares for using the SSE state.
12301 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12302 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12303#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12304/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12305#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12306/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12307#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12308
12309/** Prepares for using the AVX state.
12310 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12311 * Ensures the guest AVX state in the CPUMCTX is up to date.
12312 * @note This will include the AVX512 state too when support for it is added
12313 * due to the zero extending feature of VEX instruction. */
12314#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12315/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12316#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12317/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12318#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12319
12320/**
12321 * Calls a MMX assembly implementation taking two visible arguments.
12322 *
12323 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12324 * @param a0 The first extra argument.
12325 * @param a1 The second extra argument.
12326 */
12327#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12328 do { \
12329 IEM_MC_PREPARE_FPU_USAGE(); \
12330 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12331 } while (0)
12332
12333/**
12334 * Calls a MMX assembly implementation taking three visible arguments.
12335 *
12336 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12337 * @param a0 The first extra argument.
12338 * @param a1 The second extra argument.
12339 * @param a2 The third extra argument.
12340 */
12341#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12342 do { \
12343 IEM_MC_PREPARE_FPU_USAGE(); \
12344 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12345 } while (0)
12346
12347
12348/**
12349 * Calls a SSE assembly implementation taking two visible arguments.
12350 *
12351 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12352 * @param a0 The first extra argument.
12353 * @param a1 The second extra argument.
12354 */
12355#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12356 do { \
12357 IEM_MC_PREPARE_SSE_USAGE(); \
12358 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12359 } while (0)
12360
12361/**
12362 * Calls a SSE assembly implementation taking three visible arguments.
12363 *
12364 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12365 * @param a0 The first extra argument.
12366 * @param a1 The second extra argument.
12367 * @param a2 The third extra argument.
12368 */
12369#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12370 do { \
12371 IEM_MC_PREPARE_SSE_USAGE(); \
12372 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12373 } while (0)
12374
12375
12376/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12377 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12378#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12379 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12380
12381/**
12382 * Calls a AVX assembly implementation taking two visible arguments.
12383 *
12384 * There is one implicit zero'th argument, a pointer to the extended state.
12385 *
12386 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12387 * @param a1 The first extra argument.
12388 * @param a2 The second extra argument.
12389 */
12390#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12391 do { \
12392 IEM_MC_PREPARE_AVX_USAGE(); \
12393 a_pfnAImpl(pXState, (a1), (a2)); \
12394 } while (0)
12395
12396/**
12397 * Calls a AVX assembly implementation taking three visible arguments.
12398 *
12399 * There is one implicit zero'th argument, a pointer to the extended state.
12400 *
12401 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12402 * @param a1 The first extra argument.
12403 * @param a2 The second extra argument.
12404 * @param a3 The third extra argument.
12405 */
12406#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12407 do { \
12408 IEM_MC_PREPARE_AVX_USAGE(); \
12409 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12410 } while (0)
12411
12412/** @note Not for IOPL or IF testing. */
12413#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12422 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12423 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12426 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12427 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12428/** @note Not for IOPL or IF testing. */
12429#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12430 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12431 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12432 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12433/** @note Not for IOPL or IF testing. */
12434#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12435 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12436 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12437 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12438#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12439#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12440#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12441/** @note Not for IOPL or IF testing. */
12442#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12443 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12444 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12445/** @note Not for IOPL or IF testing. */
12446#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12447 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12448 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12449/** @note Not for IOPL or IF testing. */
12450#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12451 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12452 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12453/** @note Not for IOPL or IF testing. */
12454#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12455 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12456 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12457/** @note Not for IOPL or IF testing. */
12458#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12459 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12460 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12461/** @note Not for IOPL or IF testing. */
12462#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12463 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12464 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12465#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12466#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12467
12468#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12469 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12470#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12471 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12472#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12473 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12474#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12475 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12476#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12477 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12478#define IEM_MC_IF_FCW_IM() \
12479 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12480
12481#define IEM_MC_ELSE() } else {
12482#define IEM_MC_ENDIF() } do {} while (0)
12483
12484/** @} */
12485
12486
12487/** @name Opcode Debug Helpers.
12488 * @{
12489 */
12490#ifdef VBOX_WITH_STATISTICS
12491# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12492#else
12493# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12494#endif
12495
12496#ifdef DEBUG
12497# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12498 do { \
12499 IEMOP_INC_STATS(a_Stats); \
12500 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12501 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12502 } while (0)
12503
12504# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12505 do { \
12506 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12507 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12508 (void)RT_CONCAT(OP_,a_Upper); \
12509 (void)(a_fDisHints); \
12510 (void)(a_fIemHints); \
12511 } while (0)
12512
12513# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12514 do { \
12515 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12516 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12517 (void)RT_CONCAT(OP_,a_Upper); \
12518 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12519 (void)(a_fDisHints); \
12520 (void)(a_fIemHints); \
12521 } while (0)
12522
12523# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12524 do { \
12525 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12526 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12527 (void)RT_CONCAT(OP_,a_Upper); \
12528 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12529 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12530 (void)(a_fDisHints); \
12531 (void)(a_fIemHints); \
12532 } while (0)
12533
12534# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12535 do { \
12536 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12537 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12538 (void)RT_CONCAT(OP_,a_Upper); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12540 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12542 (void)(a_fDisHints); \
12543 (void)(a_fIemHints); \
12544 } while (0)
12545
12546# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12547 do { \
12548 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12549 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12550 (void)RT_CONCAT(OP_,a_Upper); \
12551 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12555 (void)(a_fDisHints); \
12556 (void)(a_fIemHints); \
12557 } while (0)
12558
12559#else
12560# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12561
12562# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12563 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12564# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12565 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12566# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12567 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12568# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12569 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12570# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12572
12573#endif
12574
12575#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC0EX(a_Lower, \
12577 #a_Lower, \
12578 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12579#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12580 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12581 #a_Lower " " #a_Op1, \
12582 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12583#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12584 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12585 #a_Lower " " #a_Op1 "," #a_Op2, \
12586 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12587#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12588 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12589 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12590 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12591#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12592 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12593 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12594 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12595
12596/** @} */
12597
12598
12599/** @name Opcode Helpers.
12600 * @{
12601 */
12602
12603#ifdef IN_RING3
12604# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12605 do { \
12606 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12607 else \
12608 { \
12609 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12610 return IEMOP_RAISE_INVALID_OPCODE(); \
12611 } \
12612 } while (0)
12613#else
12614# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12615 do { \
12616 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12617 else return IEMOP_RAISE_INVALID_OPCODE(); \
12618 } while (0)
12619#endif
12620
12621/** The instruction requires a 186 or later. */
12622#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12623# define IEMOP_HLP_MIN_186() do { } while (0)
12624#else
12625# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12626#endif
12627
12628/** The instruction requires a 286 or later. */
12629#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12630# define IEMOP_HLP_MIN_286() do { } while (0)
12631#else
12632# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12633#endif
12634
12635/** The instruction requires a 386 or later. */
12636#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12637# define IEMOP_HLP_MIN_386() do { } while (0)
12638#else
12639# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12640#endif
12641
12642/** The instruction requires a 386 or later if the given expression is true. */
12643#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12644# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12645#else
12646# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12647#endif
12648
12649/** The instruction requires a 486 or later. */
12650#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12651# define IEMOP_HLP_MIN_486() do { } while (0)
12652#else
12653# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12654#endif
12655
12656/** The instruction requires a Pentium (586) or later. */
12657#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12658# define IEMOP_HLP_MIN_586() do { } while (0)
12659#else
12660# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12661#endif
12662
12663/** The instruction requires a PentiumPro (686) or later. */
12664#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12665# define IEMOP_HLP_MIN_686() do { } while (0)
12666#else
12667# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12668#endif
12669
12670
12671/** The instruction raises an \#UD in real and V8086 mode. */
12672#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12673 do \
12674 { \
12675 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12676 else return IEMOP_RAISE_INVALID_OPCODE(); \
12677 } while (0)
12678
12679/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12680 * 64-bit mode. */
12681#define IEMOP_HLP_NO_64BIT() \
12682 do \
12683 { \
12684 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12685 return IEMOP_RAISE_INVALID_OPCODE(); \
12686 } while (0)
12687
12688/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12689 * 64-bit mode. */
12690#define IEMOP_HLP_ONLY_64BIT() \
12691 do \
12692 { \
12693 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12694 return IEMOP_RAISE_INVALID_OPCODE(); \
12695 } while (0)
12696
12697/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12698#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12699 do \
12700 { \
12701 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12702 iemRecalEffOpSize64Default(pVCpu); \
12703 } while (0)
12704
12705/** The instruction has 64-bit operand size if 64-bit mode. */
12706#define IEMOP_HLP_64BIT_OP_SIZE() \
12707 do \
12708 { \
12709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12710 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12711 } while (0)
12712
12713/** Only a REX prefix immediately preceeding the first opcode byte takes
12714 * effect. This macro helps ensuring this as well as logging bad guest code. */
12715#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12716 do \
12717 { \
12718 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12719 { \
12720 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12721 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12722 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12723 pVCpu->iem.s.uRexB = 0; \
12724 pVCpu->iem.s.uRexIndex = 0; \
12725 pVCpu->iem.s.uRexReg = 0; \
12726 iemRecalEffOpSize(pVCpu); \
12727 } \
12728 } while (0)
12729
12730/**
12731 * Done decoding.
12732 */
12733#define IEMOP_HLP_DONE_DECODING() \
12734 do \
12735 { \
12736 /*nothing for now, maybe later... */ \
12737 } while (0)
12738
12739/**
12740 * Done decoding, raise \#UD exception if lock prefix present.
12741 */
12742#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12743 do \
12744 { \
12745 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12746 { /* likely */ } \
12747 else \
12748 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12749 } while (0)
12750
12751
12752/**
12753 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12754 * repnz or size prefixes are present, or if in real or v8086 mode.
12755 */
12756#define IEMOP_HLP_DONE_VEX_DECODING() \
12757 do \
12758 { \
12759 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12760 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12761 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12762 { /* likely */ } \
12763 else \
12764 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12765 } while (0)
12766
12767/**
12768 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12769 * repnz or size prefixes are present, or if in real or v8086 mode.
12770 */
12771#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12772 do \
12773 { \
12774 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12775 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12776 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12777 && pVCpu->iem.s.uVexLength == 0)) \
12778 { /* likely */ } \
12779 else \
12780 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12781 } while (0)
12782
12783
12784/**
12785 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12786 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12787 * register 0, or if in real or v8086 mode.
12788 */
12789#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12790 do \
12791 { \
12792 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12793 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12794 && !pVCpu->iem.s.uVex3rdReg \
12795 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12796 { /* likely */ } \
12797 else \
12798 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12799 } while (0)
12800
12801/**
12802 * Done decoding VEX, no V, L=0.
12803 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12804 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12805 */
12806#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12807 do \
12808 { \
12809 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12810 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12811 && pVCpu->iem.s.uVexLength == 0 \
12812 && pVCpu->iem.s.uVex3rdReg == 0 \
12813 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12814 { /* likely */ } \
12815 else \
12816 return IEMOP_RAISE_INVALID_OPCODE(); \
12817 } while (0)
12818
12819#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12820 do \
12821 { \
12822 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12823 { /* likely */ } \
12824 else \
12825 { \
12826 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12827 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12828 } \
12829 } while (0)
12830#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12831 do \
12832 { \
12833 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12834 { /* likely */ } \
12835 else \
12836 { \
12837 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12838 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12839 } \
12840 } while (0)
12841
12842/**
12843 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12844 * are present.
12845 */
12846#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12847 do \
12848 { \
12849 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12850 { /* likely */ } \
12851 else \
12852 return IEMOP_RAISE_INVALID_OPCODE(); \
12853 } while (0)
12854
12855
12856#ifdef VBOX_WITH_NESTED_HWVIRT
12857/** Check and handles SVM nested-guest instruction intercept and updates
12858 * NRIP if needed. */
12859# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12860 do \
12861 { \
12862 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12863 { \
12864 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12865 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12866 } \
12867 } while (0)
12868
12869/** Check and handle SVM nested-guest CR0 read intercept. */
12870# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12871 do \
12872 { \
12873 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12874 { \
12875 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12876 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12877 } \
12878 } while (0)
12879
12880#else /* !VBOX_WITH_NESTED_HWVIRT */
12881# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12882# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12883#endif /* !VBOX_WITH_NESTED_HWVIRT */
12884
12885
12886/**
12887 * Calculates the effective address of a ModR/M memory operand.
12888 *
12889 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12890 *
12891 * @return Strict VBox status code.
12892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12893 * @param bRm The ModRM byte.
12894 * @param cbImm The size of any immediate following the
12895 * effective address opcode bytes. Important for
12896 * RIP relative addressing.
12897 * @param pGCPtrEff Where to return the effective address.
12898 */
12899IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12900{
12901 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12902 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12903# define SET_SS_DEF() \
12904 do \
12905 { \
12906 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12907 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12908 } while (0)
12909
12910 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12911 {
12912/** @todo Check the effective address size crap! */
12913 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12914 {
12915 uint16_t u16EffAddr;
12916
12917 /* Handle the disp16 form with no registers first. */
12918 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12919 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12920 else
12921 {
12922 /* Get the displacment. */
12923 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12924 {
12925 case 0: u16EffAddr = 0; break;
12926 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12927 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12928 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12929 }
12930
12931 /* Add the base and index registers to the disp. */
12932 switch (bRm & X86_MODRM_RM_MASK)
12933 {
12934 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12935 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12936 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12937 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12938 case 4: u16EffAddr += pCtx->si; break;
12939 case 5: u16EffAddr += pCtx->di; break;
12940 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12941 case 7: u16EffAddr += pCtx->bx; break;
12942 }
12943 }
12944
12945 *pGCPtrEff = u16EffAddr;
12946 }
12947 else
12948 {
12949 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12950 uint32_t u32EffAddr;
12951
12952 /* Handle the disp32 form with no registers first. */
12953 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12954 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12955 else
12956 {
12957 /* Get the register (or SIB) value. */
12958 switch ((bRm & X86_MODRM_RM_MASK))
12959 {
12960 case 0: u32EffAddr = pCtx->eax; break;
12961 case 1: u32EffAddr = pCtx->ecx; break;
12962 case 2: u32EffAddr = pCtx->edx; break;
12963 case 3: u32EffAddr = pCtx->ebx; break;
12964 case 4: /* SIB */
12965 {
12966 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12967
12968 /* Get the index and scale it. */
12969 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12970 {
12971 case 0: u32EffAddr = pCtx->eax; break;
12972 case 1: u32EffAddr = pCtx->ecx; break;
12973 case 2: u32EffAddr = pCtx->edx; break;
12974 case 3: u32EffAddr = pCtx->ebx; break;
12975 case 4: u32EffAddr = 0; /*none */ break;
12976 case 5: u32EffAddr = pCtx->ebp; break;
12977 case 6: u32EffAddr = pCtx->esi; break;
12978 case 7: u32EffAddr = pCtx->edi; break;
12979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12980 }
12981 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12982
12983 /* add base */
12984 switch (bSib & X86_SIB_BASE_MASK)
12985 {
12986 case 0: u32EffAddr += pCtx->eax; break;
12987 case 1: u32EffAddr += pCtx->ecx; break;
12988 case 2: u32EffAddr += pCtx->edx; break;
12989 case 3: u32EffAddr += pCtx->ebx; break;
12990 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12991 case 5:
12992 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12993 {
12994 u32EffAddr += pCtx->ebp;
12995 SET_SS_DEF();
12996 }
12997 else
12998 {
12999 uint32_t u32Disp;
13000 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13001 u32EffAddr += u32Disp;
13002 }
13003 break;
13004 case 6: u32EffAddr += pCtx->esi; break;
13005 case 7: u32EffAddr += pCtx->edi; break;
13006 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13007 }
13008 break;
13009 }
13010 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13011 case 6: u32EffAddr = pCtx->esi; break;
13012 case 7: u32EffAddr = pCtx->edi; break;
13013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13014 }
13015
13016 /* Get and add the displacement. */
13017 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13018 {
13019 case 0:
13020 break;
13021 case 1:
13022 {
13023 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13024 u32EffAddr += i8Disp;
13025 break;
13026 }
13027 case 2:
13028 {
13029 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13030 u32EffAddr += u32Disp;
13031 break;
13032 }
13033 default:
13034 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13035 }
13036
13037 }
13038 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13039 *pGCPtrEff = u32EffAddr;
13040 else
13041 {
13042 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13043 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13044 }
13045 }
13046 }
13047 else
13048 {
13049 uint64_t u64EffAddr;
13050
13051 /* Handle the rip+disp32 form with no registers first. */
13052 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13053 {
13054 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13055 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13056 }
13057 else
13058 {
13059 /* Get the register (or SIB) value. */
13060 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13061 {
13062 case 0: u64EffAddr = pCtx->rax; break;
13063 case 1: u64EffAddr = pCtx->rcx; break;
13064 case 2: u64EffAddr = pCtx->rdx; break;
13065 case 3: u64EffAddr = pCtx->rbx; break;
13066 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13067 case 6: u64EffAddr = pCtx->rsi; break;
13068 case 7: u64EffAddr = pCtx->rdi; break;
13069 case 8: u64EffAddr = pCtx->r8; break;
13070 case 9: u64EffAddr = pCtx->r9; break;
13071 case 10: u64EffAddr = pCtx->r10; break;
13072 case 11: u64EffAddr = pCtx->r11; break;
13073 case 13: u64EffAddr = pCtx->r13; break;
13074 case 14: u64EffAddr = pCtx->r14; break;
13075 case 15: u64EffAddr = pCtx->r15; break;
13076 /* SIB */
13077 case 4:
13078 case 12:
13079 {
13080 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13081
13082 /* Get the index and scale it. */
13083 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13084 {
13085 case 0: u64EffAddr = pCtx->rax; break;
13086 case 1: u64EffAddr = pCtx->rcx; break;
13087 case 2: u64EffAddr = pCtx->rdx; break;
13088 case 3: u64EffAddr = pCtx->rbx; break;
13089 case 4: u64EffAddr = 0; /*none */ break;
13090 case 5: u64EffAddr = pCtx->rbp; break;
13091 case 6: u64EffAddr = pCtx->rsi; break;
13092 case 7: u64EffAddr = pCtx->rdi; break;
13093 case 8: u64EffAddr = pCtx->r8; break;
13094 case 9: u64EffAddr = pCtx->r9; break;
13095 case 10: u64EffAddr = pCtx->r10; break;
13096 case 11: u64EffAddr = pCtx->r11; break;
13097 case 12: u64EffAddr = pCtx->r12; break;
13098 case 13: u64EffAddr = pCtx->r13; break;
13099 case 14: u64EffAddr = pCtx->r14; break;
13100 case 15: u64EffAddr = pCtx->r15; break;
13101 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13102 }
13103 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13104
13105 /* add base */
13106 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13107 {
13108 case 0: u64EffAddr += pCtx->rax; break;
13109 case 1: u64EffAddr += pCtx->rcx; break;
13110 case 2: u64EffAddr += pCtx->rdx; break;
13111 case 3: u64EffAddr += pCtx->rbx; break;
13112 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13113 case 6: u64EffAddr += pCtx->rsi; break;
13114 case 7: u64EffAddr += pCtx->rdi; break;
13115 case 8: u64EffAddr += pCtx->r8; break;
13116 case 9: u64EffAddr += pCtx->r9; break;
13117 case 10: u64EffAddr += pCtx->r10; break;
13118 case 11: u64EffAddr += pCtx->r11; break;
13119 case 12: u64EffAddr += pCtx->r12; break;
13120 case 14: u64EffAddr += pCtx->r14; break;
13121 case 15: u64EffAddr += pCtx->r15; break;
13122 /* complicated encodings */
13123 case 5:
13124 case 13:
13125 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13126 {
13127 if (!pVCpu->iem.s.uRexB)
13128 {
13129 u64EffAddr += pCtx->rbp;
13130 SET_SS_DEF();
13131 }
13132 else
13133 u64EffAddr += pCtx->r13;
13134 }
13135 else
13136 {
13137 uint32_t u32Disp;
13138 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13139 u64EffAddr += (int32_t)u32Disp;
13140 }
13141 break;
13142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13143 }
13144 break;
13145 }
13146 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13147 }
13148
13149 /* Get and add the displacement. */
13150 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13151 {
13152 case 0:
13153 break;
13154 case 1:
13155 {
13156 int8_t i8Disp;
13157 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13158 u64EffAddr += i8Disp;
13159 break;
13160 }
13161 case 2:
13162 {
13163 uint32_t u32Disp;
13164 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13165 u64EffAddr += (int32_t)u32Disp;
13166 break;
13167 }
13168 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13169 }
13170
13171 }
13172
13173 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13174 *pGCPtrEff = u64EffAddr;
13175 else
13176 {
13177 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13178 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13179 }
13180 }
13181
13182 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13183 return VINF_SUCCESS;
13184}
13185
13186
13187/**
13188 * Calculates the effective address of a ModR/M memory operand.
13189 *
13190 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13191 *
13192 * @return Strict VBox status code.
13193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13194 * @param bRm The ModRM byte.
13195 * @param cbImm The size of any immediate following the
13196 * effective address opcode bytes. Important for
13197 * RIP relative addressing.
13198 * @param pGCPtrEff Where to return the effective address.
13199 * @param offRsp RSP displacement.
13200 */
13201IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13202{
13203 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13204 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13205# define SET_SS_DEF() \
13206 do \
13207 { \
13208 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13209 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13210 } while (0)
13211
13212 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13213 {
13214/** @todo Check the effective address size crap! */
13215 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13216 {
13217 uint16_t u16EffAddr;
13218
13219 /* Handle the disp16 form with no registers first. */
13220 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13221 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13222 else
13223 {
13224 /* Get the displacment. */
13225 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13226 {
13227 case 0: u16EffAddr = 0; break;
13228 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13229 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13230 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13231 }
13232
13233 /* Add the base and index registers to the disp. */
13234 switch (bRm & X86_MODRM_RM_MASK)
13235 {
13236 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13237 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13238 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13239 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13240 case 4: u16EffAddr += pCtx->si; break;
13241 case 5: u16EffAddr += pCtx->di; break;
13242 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13243 case 7: u16EffAddr += pCtx->bx; break;
13244 }
13245 }
13246
13247 *pGCPtrEff = u16EffAddr;
13248 }
13249 else
13250 {
13251 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13252 uint32_t u32EffAddr;
13253
13254 /* Handle the disp32 form with no registers first. */
13255 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13256 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13257 else
13258 {
13259 /* Get the register (or SIB) value. */
13260 switch ((bRm & X86_MODRM_RM_MASK))
13261 {
13262 case 0: u32EffAddr = pCtx->eax; break;
13263 case 1: u32EffAddr = pCtx->ecx; break;
13264 case 2: u32EffAddr = pCtx->edx; break;
13265 case 3: u32EffAddr = pCtx->ebx; break;
13266 case 4: /* SIB */
13267 {
13268 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13269
13270 /* Get the index and scale it. */
13271 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13272 {
13273 case 0: u32EffAddr = pCtx->eax; break;
13274 case 1: u32EffAddr = pCtx->ecx; break;
13275 case 2: u32EffAddr = pCtx->edx; break;
13276 case 3: u32EffAddr = pCtx->ebx; break;
13277 case 4: u32EffAddr = 0; /*none */ break;
13278 case 5: u32EffAddr = pCtx->ebp; break;
13279 case 6: u32EffAddr = pCtx->esi; break;
13280 case 7: u32EffAddr = pCtx->edi; break;
13281 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13282 }
13283 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13284
13285 /* add base */
13286 switch (bSib & X86_SIB_BASE_MASK)
13287 {
13288 case 0: u32EffAddr += pCtx->eax; break;
13289 case 1: u32EffAddr += pCtx->ecx; break;
13290 case 2: u32EffAddr += pCtx->edx; break;
13291 case 3: u32EffAddr += pCtx->ebx; break;
13292 case 4:
13293 u32EffAddr += pCtx->esp + offRsp;
13294 SET_SS_DEF();
13295 break;
13296 case 5:
13297 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13298 {
13299 u32EffAddr += pCtx->ebp;
13300 SET_SS_DEF();
13301 }
13302 else
13303 {
13304 uint32_t u32Disp;
13305 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13306 u32EffAddr += u32Disp;
13307 }
13308 break;
13309 case 6: u32EffAddr += pCtx->esi; break;
13310 case 7: u32EffAddr += pCtx->edi; break;
13311 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13312 }
13313 break;
13314 }
13315 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13316 case 6: u32EffAddr = pCtx->esi; break;
13317 case 7: u32EffAddr = pCtx->edi; break;
13318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13319 }
13320
13321 /* Get and add the displacement. */
13322 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13323 {
13324 case 0:
13325 break;
13326 case 1:
13327 {
13328 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13329 u32EffAddr += i8Disp;
13330 break;
13331 }
13332 case 2:
13333 {
13334 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13335 u32EffAddr += u32Disp;
13336 break;
13337 }
13338 default:
13339 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13340 }
13341
13342 }
13343 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13344 *pGCPtrEff = u32EffAddr;
13345 else
13346 {
13347 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13348 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13349 }
13350 }
13351 }
13352 else
13353 {
13354 uint64_t u64EffAddr;
13355
13356 /* Handle the rip+disp32 form with no registers first. */
13357 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13358 {
13359 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13360 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13361 }
13362 else
13363 {
13364 /* Get the register (or SIB) value. */
13365 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13366 {
13367 case 0: u64EffAddr = pCtx->rax; break;
13368 case 1: u64EffAddr = pCtx->rcx; break;
13369 case 2: u64EffAddr = pCtx->rdx; break;
13370 case 3: u64EffAddr = pCtx->rbx; break;
13371 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13372 case 6: u64EffAddr = pCtx->rsi; break;
13373 case 7: u64EffAddr = pCtx->rdi; break;
13374 case 8: u64EffAddr = pCtx->r8; break;
13375 case 9: u64EffAddr = pCtx->r9; break;
13376 case 10: u64EffAddr = pCtx->r10; break;
13377 case 11: u64EffAddr = pCtx->r11; break;
13378 case 13: u64EffAddr = pCtx->r13; break;
13379 case 14: u64EffAddr = pCtx->r14; break;
13380 case 15: u64EffAddr = pCtx->r15; break;
13381 /* SIB */
13382 case 4:
13383 case 12:
13384 {
13385 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13386
13387 /* Get the index and scale it. */
13388 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13389 {
13390 case 0: u64EffAddr = pCtx->rax; break;
13391 case 1: u64EffAddr = pCtx->rcx; break;
13392 case 2: u64EffAddr = pCtx->rdx; break;
13393 case 3: u64EffAddr = pCtx->rbx; break;
13394 case 4: u64EffAddr = 0; /*none */ break;
13395 case 5: u64EffAddr = pCtx->rbp; break;
13396 case 6: u64EffAddr = pCtx->rsi; break;
13397 case 7: u64EffAddr = pCtx->rdi; break;
13398 case 8: u64EffAddr = pCtx->r8; break;
13399 case 9: u64EffAddr = pCtx->r9; break;
13400 case 10: u64EffAddr = pCtx->r10; break;
13401 case 11: u64EffAddr = pCtx->r11; break;
13402 case 12: u64EffAddr = pCtx->r12; break;
13403 case 13: u64EffAddr = pCtx->r13; break;
13404 case 14: u64EffAddr = pCtx->r14; break;
13405 case 15: u64EffAddr = pCtx->r15; break;
13406 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13407 }
13408 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13409
13410 /* add base */
13411 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13412 {
13413 case 0: u64EffAddr += pCtx->rax; break;
13414 case 1: u64EffAddr += pCtx->rcx; break;
13415 case 2: u64EffAddr += pCtx->rdx; break;
13416 case 3: u64EffAddr += pCtx->rbx; break;
13417 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13418 case 6: u64EffAddr += pCtx->rsi; break;
13419 case 7: u64EffAddr += pCtx->rdi; break;
13420 case 8: u64EffAddr += pCtx->r8; break;
13421 case 9: u64EffAddr += pCtx->r9; break;
13422 case 10: u64EffAddr += pCtx->r10; break;
13423 case 11: u64EffAddr += pCtx->r11; break;
13424 case 12: u64EffAddr += pCtx->r12; break;
13425 case 14: u64EffAddr += pCtx->r14; break;
13426 case 15: u64EffAddr += pCtx->r15; break;
13427 /* complicated encodings */
13428 case 5:
13429 case 13:
13430 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13431 {
13432 if (!pVCpu->iem.s.uRexB)
13433 {
13434 u64EffAddr += pCtx->rbp;
13435 SET_SS_DEF();
13436 }
13437 else
13438 u64EffAddr += pCtx->r13;
13439 }
13440 else
13441 {
13442 uint32_t u32Disp;
13443 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13444 u64EffAddr += (int32_t)u32Disp;
13445 }
13446 break;
13447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13448 }
13449 break;
13450 }
13451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13452 }
13453
13454 /* Get and add the displacement. */
13455 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13456 {
13457 case 0:
13458 break;
13459 case 1:
13460 {
13461 int8_t i8Disp;
13462 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13463 u64EffAddr += i8Disp;
13464 break;
13465 }
13466 case 2:
13467 {
13468 uint32_t u32Disp;
13469 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13470 u64EffAddr += (int32_t)u32Disp;
13471 break;
13472 }
13473 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13474 }
13475
13476 }
13477
13478 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13479 *pGCPtrEff = u64EffAddr;
13480 else
13481 {
13482 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13483 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13484 }
13485 }
13486
13487 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13488 return VINF_SUCCESS;
13489}
13490
13491
13492#ifdef IEM_WITH_SETJMP
13493/**
13494 * Calculates the effective address of a ModR/M memory operand.
13495 *
13496 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13497 *
13498 * May longjmp on internal error.
13499 *
13500 * @return The effective address.
13501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13502 * @param bRm The ModRM byte.
13503 * @param cbImm The size of any immediate following the
13504 * effective address opcode bytes. Important for
13505 * RIP relative addressing.
13506 */
13507IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13508{
13509 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13510 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13511# define SET_SS_DEF() \
13512 do \
13513 { \
13514 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13515 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13516 } while (0)
13517
13518 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13519 {
13520/** @todo Check the effective address size crap! */
13521 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13522 {
13523 uint16_t u16EffAddr;
13524
13525 /* Handle the disp16 form with no registers first. */
13526 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13527 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13528 else
13529 {
13530 /* Get the displacment. */
13531 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13532 {
13533 case 0: u16EffAddr = 0; break;
13534 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13535 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13536 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13537 }
13538
13539 /* Add the base and index registers to the disp. */
13540 switch (bRm & X86_MODRM_RM_MASK)
13541 {
13542 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13543 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13544 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13545 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13546 case 4: u16EffAddr += pCtx->si; break;
13547 case 5: u16EffAddr += pCtx->di; break;
13548 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13549 case 7: u16EffAddr += pCtx->bx; break;
13550 }
13551 }
13552
13553 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13554 return u16EffAddr;
13555 }
13556
13557 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13558 uint32_t u32EffAddr;
13559
13560 /* Handle the disp32 form with no registers first. */
13561 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13562 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13563 else
13564 {
13565 /* Get the register (or SIB) value. */
13566 switch ((bRm & X86_MODRM_RM_MASK))
13567 {
13568 case 0: u32EffAddr = pCtx->eax; break;
13569 case 1: u32EffAddr = pCtx->ecx; break;
13570 case 2: u32EffAddr = pCtx->edx; break;
13571 case 3: u32EffAddr = pCtx->ebx; break;
13572 case 4: /* SIB */
13573 {
13574 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13575
13576 /* Get the index and scale it. */
13577 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13578 {
13579 case 0: u32EffAddr = pCtx->eax; break;
13580 case 1: u32EffAddr = pCtx->ecx; break;
13581 case 2: u32EffAddr = pCtx->edx; break;
13582 case 3: u32EffAddr = pCtx->ebx; break;
13583 case 4: u32EffAddr = 0; /*none */ break;
13584 case 5: u32EffAddr = pCtx->ebp; break;
13585 case 6: u32EffAddr = pCtx->esi; break;
13586 case 7: u32EffAddr = pCtx->edi; break;
13587 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13588 }
13589 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13590
13591 /* add base */
13592 switch (bSib & X86_SIB_BASE_MASK)
13593 {
13594 case 0: u32EffAddr += pCtx->eax; break;
13595 case 1: u32EffAddr += pCtx->ecx; break;
13596 case 2: u32EffAddr += pCtx->edx; break;
13597 case 3: u32EffAddr += pCtx->ebx; break;
13598 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13599 case 5:
13600 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13601 {
13602 u32EffAddr += pCtx->ebp;
13603 SET_SS_DEF();
13604 }
13605 else
13606 {
13607 uint32_t u32Disp;
13608 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13609 u32EffAddr += u32Disp;
13610 }
13611 break;
13612 case 6: u32EffAddr += pCtx->esi; break;
13613 case 7: u32EffAddr += pCtx->edi; break;
13614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13615 }
13616 break;
13617 }
13618 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13619 case 6: u32EffAddr = pCtx->esi; break;
13620 case 7: u32EffAddr = pCtx->edi; break;
13621 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13622 }
13623
13624 /* Get and add the displacement. */
13625 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13626 {
13627 case 0:
13628 break;
13629 case 1:
13630 {
13631 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13632 u32EffAddr += i8Disp;
13633 break;
13634 }
13635 case 2:
13636 {
13637 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13638 u32EffAddr += u32Disp;
13639 break;
13640 }
13641 default:
13642 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13643 }
13644 }
13645
13646 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13647 {
13648 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13649 return u32EffAddr;
13650 }
13651 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13652 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13653 return u32EffAddr & UINT16_MAX;
13654 }
13655
13656 uint64_t u64EffAddr;
13657
13658 /* Handle the rip+disp32 form with no registers first. */
13659 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13660 {
13661 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13662 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13663 }
13664 else
13665 {
13666 /* Get the register (or SIB) value. */
13667 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13668 {
13669 case 0: u64EffAddr = pCtx->rax; break;
13670 case 1: u64EffAddr = pCtx->rcx; break;
13671 case 2: u64EffAddr = pCtx->rdx; break;
13672 case 3: u64EffAddr = pCtx->rbx; break;
13673 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13674 case 6: u64EffAddr = pCtx->rsi; break;
13675 case 7: u64EffAddr = pCtx->rdi; break;
13676 case 8: u64EffAddr = pCtx->r8; break;
13677 case 9: u64EffAddr = pCtx->r9; break;
13678 case 10: u64EffAddr = pCtx->r10; break;
13679 case 11: u64EffAddr = pCtx->r11; break;
13680 case 13: u64EffAddr = pCtx->r13; break;
13681 case 14: u64EffAddr = pCtx->r14; break;
13682 case 15: u64EffAddr = pCtx->r15; break;
13683 /* SIB */
13684 case 4:
13685 case 12:
13686 {
13687 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13688
13689 /* Get the index and scale it. */
13690 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13691 {
13692 case 0: u64EffAddr = pCtx->rax; break;
13693 case 1: u64EffAddr = pCtx->rcx; break;
13694 case 2: u64EffAddr = pCtx->rdx; break;
13695 case 3: u64EffAddr = pCtx->rbx; break;
13696 case 4: u64EffAddr = 0; /*none */ break;
13697 case 5: u64EffAddr = pCtx->rbp; break;
13698 case 6: u64EffAddr = pCtx->rsi; break;
13699 case 7: u64EffAddr = pCtx->rdi; break;
13700 case 8: u64EffAddr = pCtx->r8; break;
13701 case 9: u64EffAddr = pCtx->r9; break;
13702 case 10: u64EffAddr = pCtx->r10; break;
13703 case 11: u64EffAddr = pCtx->r11; break;
13704 case 12: u64EffAddr = pCtx->r12; break;
13705 case 13: u64EffAddr = pCtx->r13; break;
13706 case 14: u64EffAddr = pCtx->r14; break;
13707 case 15: u64EffAddr = pCtx->r15; break;
13708 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13709 }
13710 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13711
13712 /* add base */
13713 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13714 {
13715 case 0: u64EffAddr += pCtx->rax; break;
13716 case 1: u64EffAddr += pCtx->rcx; break;
13717 case 2: u64EffAddr += pCtx->rdx; break;
13718 case 3: u64EffAddr += pCtx->rbx; break;
13719 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13720 case 6: u64EffAddr += pCtx->rsi; break;
13721 case 7: u64EffAddr += pCtx->rdi; break;
13722 case 8: u64EffAddr += pCtx->r8; break;
13723 case 9: u64EffAddr += pCtx->r9; break;
13724 case 10: u64EffAddr += pCtx->r10; break;
13725 case 11: u64EffAddr += pCtx->r11; break;
13726 case 12: u64EffAddr += pCtx->r12; break;
13727 case 14: u64EffAddr += pCtx->r14; break;
13728 case 15: u64EffAddr += pCtx->r15; break;
13729 /* complicated encodings */
13730 case 5:
13731 case 13:
13732 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13733 {
13734 if (!pVCpu->iem.s.uRexB)
13735 {
13736 u64EffAddr += pCtx->rbp;
13737 SET_SS_DEF();
13738 }
13739 else
13740 u64EffAddr += pCtx->r13;
13741 }
13742 else
13743 {
13744 uint32_t u32Disp;
13745 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13746 u64EffAddr += (int32_t)u32Disp;
13747 }
13748 break;
13749 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13750 }
13751 break;
13752 }
13753 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13754 }
13755
13756 /* Get and add the displacement. */
13757 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13758 {
13759 case 0:
13760 break;
13761 case 1:
13762 {
13763 int8_t i8Disp;
13764 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13765 u64EffAddr += i8Disp;
13766 break;
13767 }
13768 case 2:
13769 {
13770 uint32_t u32Disp;
13771 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13772 u64EffAddr += (int32_t)u32Disp;
13773 break;
13774 }
13775 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13776 }
13777
13778 }
13779
13780 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13781 {
13782 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13783 return u64EffAddr;
13784 }
13785 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13786 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13787 return u64EffAddr & UINT32_MAX;
13788}
13789#endif /* IEM_WITH_SETJMP */
13790
13791
13792/** @} */
13793
13794
13795
13796/*
13797 * Include the instructions
13798 */
13799#include "IEMAllInstructions.cpp.h"
13800
13801
13802
13803
13804#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13805
13806/**
13807 * Sets up execution verification mode.
13808 */
13809IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13810{
13811 PVMCPU pVCpu = pVCpu;
13812 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13813
13814 /*
13815 * Always note down the address of the current instruction.
13816 */
13817 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13818 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13819
13820 /*
13821 * Enable verification and/or logging.
13822 */
13823 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13824 if ( fNewNoRem
13825 && ( 0
13826#if 0 /* auto enable on first paged protected mode interrupt */
13827 || ( pOrgCtx->eflags.Bits.u1IF
13828 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13829 && TRPMHasTrap(pVCpu)
13830 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13831#endif
13832#if 0
13833 || ( pOrgCtx->cs == 0x10
13834 && ( pOrgCtx->rip == 0x90119e3e
13835 || pOrgCtx->rip == 0x901d9810)
13836#endif
13837#if 0 /* Auto enable DSL - FPU stuff. */
13838 || ( pOrgCtx->cs == 0x10
13839 && (// pOrgCtx->rip == 0xc02ec07f
13840 //|| pOrgCtx->rip == 0xc02ec082
13841 //|| pOrgCtx->rip == 0xc02ec0c9
13842 0
13843 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13844#endif
13845#if 0 /* Auto enable DSL - fstp st0 stuff. */
13846 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13847#endif
13848#if 0
13849 || pOrgCtx->rip == 0x9022bb3a
13850#endif
13851#if 0
13852 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13853#endif
13854#if 0
13855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13856 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13857#endif
13858#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13859 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13860 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13861 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13862#endif
13863#if 0 /* NT4SP1 - xadd early boot. */
13864 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13865#endif
13866#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13867 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13868#endif
13869#if 0 /* NT4SP1 - cmpxchg (AMD). */
13870 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13871#endif
13872#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13873 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13874#endif
13875#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13876 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13877
13878#endif
13879#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13880 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13881
13882#endif
13883#if 0 /* NT4SP1 - frstor [ecx] */
13884 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13885#endif
13886#if 0 /* xxxxxx - All long mode code. */
13887 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13888#endif
13889#if 0 /* rep movsq linux 3.7 64-bit boot. */
13890 || (pOrgCtx->rip == 0x0000000000100241)
13891#endif
13892#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13893 || (pOrgCtx->rip == 0x000000000215e240)
13894#endif
13895#if 0 /* DOS's size-overridden iret to v8086. */
13896 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13897#endif
13898 )
13899 )
13900 {
13901 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13902 RTLogFlags(NULL, "enabled");
13903 fNewNoRem = false;
13904 }
13905 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13906 {
13907 pVCpu->iem.s.fNoRem = fNewNoRem;
13908 if (!fNewNoRem)
13909 {
13910 LogAlways(("Enabling verification mode!\n"));
13911 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13912 }
13913 else
13914 LogAlways(("Disabling verification mode!\n"));
13915 }
13916
13917 /*
13918 * Switch state.
13919 */
13920 if (IEM_VERIFICATION_ENABLED(pVCpu))
13921 {
13922 static CPUMCTX s_DebugCtx; /* Ugly! */
13923
13924 s_DebugCtx = *pOrgCtx;
13925 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13926 }
13927
13928 /*
13929 * See if there is an interrupt pending in TRPM and inject it if we can.
13930 */
13931 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13932 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13933#if defined(VBOX_WITH_NESTED_HWVIRT)
13934 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13935 if (fIntrEnabled)
13936 {
13937 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13938 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13939 else
13940 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13941 }
13942#else
13943 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13944#endif
13945 if ( fIntrEnabled
13946 && TRPMHasTrap(pVCpu)
13947 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13948 {
13949 uint8_t u8TrapNo;
13950 TRPMEVENT enmType;
13951 RTGCUINT uErrCode;
13952 RTGCPTR uCr2;
13953 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13954 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13955 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13956 TRPMResetTrap(pVCpu);
13957 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13958 }
13959
13960 /*
13961 * Reset the counters.
13962 */
13963 pVCpu->iem.s.cIOReads = 0;
13964 pVCpu->iem.s.cIOWrites = 0;
13965 pVCpu->iem.s.fIgnoreRaxRdx = false;
13966 pVCpu->iem.s.fOverlappingMovs = false;
13967 pVCpu->iem.s.fProblematicMemory = false;
13968 pVCpu->iem.s.fUndefinedEFlags = 0;
13969
13970 if (IEM_VERIFICATION_ENABLED(pVCpu))
13971 {
13972 /*
13973 * Free all verification records.
13974 */
13975 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13976 pVCpu->iem.s.pIemEvtRecHead = NULL;
13977 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13978 do
13979 {
13980 while (pEvtRec)
13981 {
13982 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13983 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13984 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13985 pEvtRec = pNext;
13986 }
13987 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13988 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13989 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13990 } while (pEvtRec);
13991 }
13992}
13993
13994
13995/**
13996 * Allocate an event record.
13997 * @returns Pointer to a record.
13998 */
13999IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
14000{
14001 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14002 return NULL;
14003
14004 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
14005 if (pEvtRec)
14006 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
14007 else
14008 {
14009 if (!pVCpu->iem.s.ppIemEvtRecNext)
14010 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
14011
14012 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
14013 if (!pEvtRec)
14014 return NULL;
14015 }
14016 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14017 pEvtRec->pNext = NULL;
14018 return pEvtRec;
14019}
14020
14021
14022/**
14023 * IOMMMIORead notification.
14024 */
14025VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14026{
14027 PVMCPU pVCpu = VMMGetCpu(pVM);
14028 if (!pVCpu)
14029 return;
14030 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14031 if (!pEvtRec)
14032 return;
14033 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14034 pEvtRec->u.RamRead.GCPhys = GCPhys;
14035 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14036 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14037 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14038}
14039
14040
14041/**
14042 * IOMMMIOWrite notification.
14043 */
14044VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14045{
14046 PVMCPU pVCpu = VMMGetCpu(pVM);
14047 if (!pVCpu)
14048 return;
14049 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14050 if (!pEvtRec)
14051 return;
14052 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14053 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14054 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14055 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14056 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14057 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14058 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14059 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14060 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14061}
14062
14063
14064/**
14065 * IOMIOPortRead notification.
14066 */
14067VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14068{
14069 PVMCPU pVCpu = VMMGetCpu(pVM);
14070 if (!pVCpu)
14071 return;
14072 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14073 if (!pEvtRec)
14074 return;
14075 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14076 pEvtRec->u.IOPortRead.Port = Port;
14077 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14078 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14079 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14080}
14081
14082/**
14083 * IOMIOPortWrite notification.
14084 */
14085VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14086{
14087 PVMCPU pVCpu = VMMGetCpu(pVM);
14088 if (!pVCpu)
14089 return;
14090 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14091 if (!pEvtRec)
14092 return;
14093 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14094 pEvtRec->u.IOPortWrite.Port = Port;
14095 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14096 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14097 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14098 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14099}
14100
14101
14102VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14103{
14104 PVMCPU pVCpu = VMMGetCpu(pVM);
14105 if (!pVCpu)
14106 return;
14107 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14108 if (!pEvtRec)
14109 return;
14110 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14111 pEvtRec->u.IOPortStrRead.Port = Port;
14112 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14113 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14114 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14115 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14116}
14117
14118
14119VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14120{
14121 PVMCPU pVCpu = VMMGetCpu(pVM);
14122 if (!pVCpu)
14123 return;
14124 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14125 if (!pEvtRec)
14126 return;
14127 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14128 pEvtRec->u.IOPortStrWrite.Port = Port;
14129 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14130 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14131 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14132 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14133}
14134
14135
14136/**
14137 * Fakes and records an I/O port read.
14138 *
14139 * @returns VINF_SUCCESS.
14140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14141 * @param Port The I/O port.
14142 * @param pu32Value Where to store the fake value.
14143 * @param cbValue The size of the access.
14144 */
14145IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14146{
14147 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14148 if (pEvtRec)
14149 {
14150 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14151 pEvtRec->u.IOPortRead.Port = Port;
14152 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14153 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14154 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14155 }
14156 pVCpu->iem.s.cIOReads++;
14157 *pu32Value = 0xcccccccc;
14158 return VINF_SUCCESS;
14159}
14160
14161
14162/**
14163 * Fakes and records an I/O port write.
14164 *
14165 * @returns VINF_SUCCESS.
14166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14167 * @param Port The I/O port.
14168 * @param u32Value The value being written.
14169 * @param cbValue The size of the access.
14170 */
14171IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14172{
14173 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14174 if (pEvtRec)
14175 {
14176 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14177 pEvtRec->u.IOPortWrite.Port = Port;
14178 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14179 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14180 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14181 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14182 }
14183 pVCpu->iem.s.cIOWrites++;
14184 return VINF_SUCCESS;
14185}
14186
14187
14188/**
14189 * Used to add extra details about a stub case.
14190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14191 */
14192IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14193{
14194 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14195 PVM pVM = pVCpu->CTX_SUFF(pVM);
14196 PVMCPU pVCpu = pVCpu;
14197 char szRegs[4096];
14198 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14199 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14200 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14201 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14202 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14203 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14204 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14205 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14206 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14207 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14208 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14209 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14210 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14211 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14212 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14213 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14214 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14215 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14216 " efer=%016VR{efer}\n"
14217 " pat=%016VR{pat}\n"
14218 " sf_mask=%016VR{sf_mask}\n"
14219 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14220 " lstar=%016VR{lstar}\n"
14221 " star=%016VR{star} cstar=%016VR{cstar}\n"
14222 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14223 );
14224
14225 char szInstr1[256];
14226 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14227 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14228 szInstr1, sizeof(szInstr1), NULL);
14229 char szInstr2[256];
14230 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14231 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14232 szInstr2, sizeof(szInstr2), NULL);
14233
14234 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14235}
14236
14237
14238/**
14239 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14240 * dump to the assertion info.
14241 *
14242 * @param pEvtRec The record to dump.
14243 */
14244IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14245{
14246 switch (pEvtRec->enmEvent)
14247 {
14248 case IEMVERIFYEVENT_IOPORT_READ:
14249 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14250 pEvtRec->u.IOPortWrite.Port,
14251 pEvtRec->u.IOPortWrite.cbValue);
14252 break;
14253 case IEMVERIFYEVENT_IOPORT_WRITE:
14254 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14255 pEvtRec->u.IOPortWrite.Port,
14256 pEvtRec->u.IOPortWrite.cbValue,
14257 pEvtRec->u.IOPortWrite.u32Value);
14258 break;
14259 case IEMVERIFYEVENT_IOPORT_STR_READ:
14260 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14261 pEvtRec->u.IOPortStrWrite.Port,
14262 pEvtRec->u.IOPortStrWrite.cbValue,
14263 pEvtRec->u.IOPortStrWrite.cTransfers);
14264 break;
14265 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14266 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14267 pEvtRec->u.IOPortStrWrite.Port,
14268 pEvtRec->u.IOPortStrWrite.cbValue,
14269 pEvtRec->u.IOPortStrWrite.cTransfers);
14270 break;
14271 case IEMVERIFYEVENT_RAM_READ:
14272 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14273 pEvtRec->u.RamRead.GCPhys,
14274 pEvtRec->u.RamRead.cb);
14275 break;
14276 case IEMVERIFYEVENT_RAM_WRITE:
14277 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14278 pEvtRec->u.RamWrite.GCPhys,
14279 pEvtRec->u.RamWrite.cb,
14280 (int)pEvtRec->u.RamWrite.cb,
14281 pEvtRec->u.RamWrite.ab);
14282 break;
14283 default:
14284 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14285 break;
14286 }
14287}
14288
14289
14290/**
14291 * Raises an assertion on the specified record, showing the given message with
14292 * a record dump attached.
14293 *
14294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14295 * @param pEvtRec1 The first record.
14296 * @param pEvtRec2 The second record.
14297 * @param pszMsg The message explaining why we're asserting.
14298 */
14299IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14300{
14301 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14302 iemVerifyAssertAddRecordDump(pEvtRec1);
14303 iemVerifyAssertAddRecordDump(pEvtRec2);
14304 iemVerifyAssertMsg2(pVCpu);
14305 RTAssertPanic();
14306}
14307
14308
14309/**
14310 * Raises an assertion on the specified record, showing the given message with
14311 * a record dump attached.
14312 *
14313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14314 * @param pEvtRec1 The first record.
14315 * @param pszMsg The message explaining why we're asserting.
14316 */
14317IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14318{
14319 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14320 iemVerifyAssertAddRecordDump(pEvtRec);
14321 iemVerifyAssertMsg2(pVCpu);
14322 RTAssertPanic();
14323}
14324
14325
14326/**
14327 * Verifies a write record.
14328 *
14329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14330 * @param pEvtRec The write record.
14331 * @param fRem Set if REM was doing the other executing. If clear
14332 * it was HM.
14333 */
14334IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14335{
14336 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14337 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14338 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14339 if ( RT_FAILURE(rc)
14340 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14341 {
14342 /* fend off ins */
14343 if ( !pVCpu->iem.s.cIOReads
14344 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14345 || ( pEvtRec->u.RamWrite.cb != 1
14346 && pEvtRec->u.RamWrite.cb != 2
14347 && pEvtRec->u.RamWrite.cb != 4) )
14348 {
14349 /* fend off ROMs and MMIO */
14350 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14351 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14352 {
14353 /* fend off fxsave */
14354 if (pEvtRec->u.RamWrite.cb != 512)
14355 {
14356 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14357 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14358 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14359 RTAssertMsg2Add("%s: %.*Rhxs\n"
14360 "iem: %.*Rhxs\n",
14361 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14362 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14363 iemVerifyAssertAddRecordDump(pEvtRec);
14364 iemVerifyAssertMsg2(pVCpu);
14365 RTAssertPanic();
14366 }
14367 }
14368 }
14369 }
14370
14371}
14372
14373/**
14374 * Performs the post-execution verfication checks.
14375 */
14376IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14377{
14378 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14379 return rcStrictIem;
14380
14381 /*
14382 * Switch back the state.
14383 */
14384 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14385 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14386 Assert(pOrgCtx != pDebugCtx);
14387 IEM_GET_CTX(pVCpu) = pOrgCtx;
14388
14389 /*
14390 * Execute the instruction in REM.
14391 */
14392 bool fRem = false;
14393 PVM pVM = pVCpu->CTX_SUFF(pVM);
14394 PVMCPU pVCpu = pVCpu;
14395 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14396#ifdef IEM_VERIFICATION_MODE_FULL_HM
14397 if ( HMIsEnabled(pVM)
14398 && pVCpu->iem.s.cIOReads == 0
14399 && pVCpu->iem.s.cIOWrites == 0
14400 && !pVCpu->iem.s.fProblematicMemory)
14401 {
14402 uint64_t uStartRip = pOrgCtx->rip;
14403 unsigned iLoops = 0;
14404 do
14405 {
14406 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14407 iLoops++;
14408 } while ( rc == VINF_SUCCESS
14409 || ( rc == VINF_EM_DBG_STEPPED
14410 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14411 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14412 || ( pOrgCtx->rip != pDebugCtx->rip
14413 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14414 && iLoops < 8) );
14415 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14416 rc = VINF_SUCCESS;
14417 }
14418#endif
14419 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14420 || rc == VINF_IOM_R3_IOPORT_READ
14421 || rc == VINF_IOM_R3_IOPORT_WRITE
14422 || rc == VINF_IOM_R3_MMIO_READ
14423 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14424 || rc == VINF_IOM_R3_MMIO_WRITE
14425 || rc == VINF_CPUM_R3_MSR_READ
14426 || rc == VINF_CPUM_R3_MSR_WRITE
14427 || rc == VINF_EM_RESCHEDULE
14428 )
14429 {
14430 EMRemLock(pVM);
14431 rc = REMR3EmulateInstruction(pVM, pVCpu);
14432 AssertRC(rc);
14433 EMRemUnlock(pVM);
14434 fRem = true;
14435 }
14436
14437# if 1 /* Skip unimplemented instructions for now. */
14438 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14439 {
14440 IEM_GET_CTX(pVCpu) = pOrgCtx;
14441 if (rc == VINF_EM_DBG_STEPPED)
14442 return VINF_SUCCESS;
14443 return rc;
14444 }
14445# endif
14446
14447 /*
14448 * Compare the register states.
14449 */
14450 unsigned cDiffs = 0;
14451 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14452 {
14453 //Log(("REM and IEM ends up with different registers!\n"));
14454 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14455
14456# define CHECK_FIELD(a_Field) \
14457 do \
14458 { \
14459 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14460 { \
14461 switch (sizeof(pOrgCtx->a_Field)) \
14462 { \
14463 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14464 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14465 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14466 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14467 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14468 } \
14469 cDiffs++; \
14470 } \
14471 } while (0)
14472# define CHECK_XSTATE_FIELD(a_Field) \
14473 do \
14474 { \
14475 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14476 { \
14477 switch (sizeof(pOrgXState->a_Field)) \
14478 { \
14479 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14480 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14481 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14482 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14483 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14484 } \
14485 cDiffs++; \
14486 } \
14487 } while (0)
14488
14489# define CHECK_BIT_FIELD(a_Field) \
14490 do \
14491 { \
14492 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14493 { \
14494 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14495 cDiffs++; \
14496 } \
14497 } while (0)
14498
14499# define CHECK_SEL(a_Sel) \
14500 do \
14501 { \
14502 CHECK_FIELD(a_Sel.Sel); \
14503 CHECK_FIELD(a_Sel.Attr.u); \
14504 CHECK_FIELD(a_Sel.u64Base); \
14505 CHECK_FIELD(a_Sel.u32Limit); \
14506 CHECK_FIELD(a_Sel.fFlags); \
14507 } while (0)
14508
14509 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14510 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14511
14512#if 1 /* The recompiler doesn't update these the intel way. */
14513 if (fRem)
14514 {
14515 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14516 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14517 pOrgXState->x87.CS = pDebugXState->x87.CS;
14518 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14519 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14520 pOrgXState->x87.DS = pDebugXState->x87.DS;
14521 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14522 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14523 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14524 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14525 }
14526#endif
14527 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14528 {
14529 RTAssertMsg2Weak(" the FPU state differs\n");
14530 cDiffs++;
14531 CHECK_XSTATE_FIELD(x87.FCW);
14532 CHECK_XSTATE_FIELD(x87.FSW);
14533 CHECK_XSTATE_FIELD(x87.FTW);
14534 CHECK_XSTATE_FIELD(x87.FOP);
14535 CHECK_XSTATE_FIELD(x87.FPUIP);
14536 CHECK_XSTATE_FIELD(x87.CS);
14537 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14538 CHECK_XSTATE_FIELD(x87.FPUDP);
14539 CHECK_XSTATE_FIELD(x87.DS);
14540 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14541 CHECK_XSTATE_FIELD(x87.MXCSR);
14542 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14543 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14545 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14546 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14547 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14548 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14549 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14550 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14551 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14552 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14553 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14554 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14555 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14556 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14557 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14558 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14559 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14560 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14561 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14562 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14563 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14564 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14565 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14566 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14567 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14568 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14569 }
14570 CHECK_FIELD(rip);
14571 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14572 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14573 {
14574 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14575 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14576 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14577 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14578 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14579 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14580 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14581 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14582 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14583 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14584 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14585 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14586 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14587 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14588 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14589 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14590 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14591 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14592 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14593 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14594 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14595 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14596 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14597 }
14598
14599 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14600 CHECK_FIELD(rax);
14601 CHECK_FIELD(rcx);
14602 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14603 CHECK_FIELD(rdx);
14604 CHECK_FIELD(rbx);
14605 CHECK_FIELD(rsp);
14606 CHECK_FIELD(rbp);
14607 CHECK_FIELD(rsi);
14608 CHECK_FIELD(rdi);
14609 CHECK_FIELD(r8);
14610 CHECK_FIELD(r9);
14611 CHECK_FIELD(r10);
14612 CHECK_FIELD(r11);
14613 CHECK_FIELD(r12);
14614 CHECK_FIELD(r13);
14615 CHECK_SEL(cs);
14616 CHECK_SEL(ss);
14617 CHECK_SEL(ds);
14618 CHECK_SEL(es);
14619 CHECK_SEL(fs);
14620 CHECK_SEL(gs);
14621 CHECK_FIELD(cr0);
14622
14623 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14624 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14625 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14626 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14627 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14628 {
14629 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14630 { /* ignore */ }
14631 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14632 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14633 && fRem)
14634 { /* ignore */ }
14635 else
14636 CHECK_FIELD(cr2);
14637 }
14638 CHECK_FIELD(cr3);
14639 CHECK_FIELD(cr4);
14640 CHECK_FIELD(dr[0]);
14641 CHECK_FIELD(dr[1]);
14642 CHECK_FIELD(dr[2]);
14643 CHECK_FIELD(dr[3]);
14644 CHECK_FIELD(dr[6]);
14645 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14646 CHECK_FIELD(dr[7]);
14647 CHECK_FIELD(gdtr.cbGdt);
14648 CHECK_FIELD(gdtr.pGdt);
14649 CHECK_FIELD(idtr.cbIdt);
14650 CHECK_FIELD(idtr.pIdt);
14651 CHECK_SEL(ldtr);
14652 CHECK_SEL(tr);
14653 CHECK_FIELD(SysEnter.cs);
14654 CHECK_FIELD(SysEnter.eip);
14655 CHECK_FIELD(SysEnter.esp);
14656 CHECK_FIELD(msrEFER);
14657 CHECK_FIELD(msrSTAR);
14658 CHECK_FIELD(msrPAT);
14659 CHECK_FIELD(msrLSTAR);
14660 CHECK_FIELD(msrCSTAR);
14661 CHECK_FIELD(msrSFMASK);
14662 CHECK_FIELD(msrKERNELGSBASE);
14663
14664 if (cDiffs != 0)
14665 {
14666 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14667 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14668 RTAssertPanic();
14669 static bool volatile s_fEnterDebugger = true;
14670 if (s_fEnterDebugger)
14671 DBGFSTOP(pVM);
14672
14673# if 1 /* Ignore unimplemented instructions for now. */
14674 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14675 rcStrictIem = VINF_SUCCESS;
14676# endif
14677 }
14678# undef CHECK_FIELD
14679# undef CHECK_BIT_FIELD
14680 }
14681
14682 /*
14683 * If the register state compared fine, check the verification event
14684 * records.
14685 */
14686 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14687 {
14688 /*
14689 * Compare verficiation event records.
14690 * - I/O port accesses should be a 1:1 match.
14691 */
14692 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14693 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14694 while (pIemRec && pOtherRec)
14695 {
14696 /* Since we might miss RAM writes and reads, ignore reads and check
14697 that any written memory is the same extra ones. */
14698 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14699 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14700 && pIemRec->pNext)
14701 {
14702 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14703 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14704 pIemRec = pIemRec->pNext;
14705 }
14706
14707 /* Do the compare. */
14708 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14709 {
14710 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14711 break;
14712 }
14713 bool fEquals;
14714 switch (pIemRec->enmEvent)
14715 {
14716 case IEMVERIFYEVENT_IOPORT_READ:
14717 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14718 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14719 break;
14720 case IEMVERIFYEVENT_IOPORT_WRITE:
14721 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14722 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14723 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14724 break;
14725 case IEMVERIFYEVENT_IOPORT_STR_READ:
14726 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14727 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14728 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14729 break;
14730 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14731 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14732 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14733 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14734 break;
14735 case IEMVERIFYEVENT_RAM_READ:
14736 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14737 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14738 break;
14739 case IEMVERIFYEVENT_RAM_WRITE:
14740 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14741 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14742 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14743 break;
14744 default:
14745 fEquals = false;
14746 break;
14747 }
14748 if (!fEquals)
14749 {
14750 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14751 break;
14752 }
14753
14754 /* advance */
14755 pIemRec = pIemRec->pNext;
14756 pOtherRec = pOtherRec->pNext;
14757 }
14758
14759 /* Ignore extra writes and reads. */
14760 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14761 {
14762 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14763 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14764 pIemRec = pIemRec->pNext;
14765 }
14766 if (pIemRec != NULL)
14767 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14768 else if (pOtherRec != NULL)
14769 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14770 }
14771 IEM_GET_CTX(pVCpu) = pOrgCtx;
14772
14773 return rcStrictIem;
14774}
14775
14776#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14777
14778/* stubs */
14779IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14780{
14781 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14782 return VERR_INTERNAL_ERROR;
14783}
14784
14785IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14786{
14787 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14788 return VERR_INTERNAL_ERROR;
14789}
14790
14791#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14792
14793
14794#ifdef LOG_ENABLED
14795/**
14796 * Logs the current instruction.
14797 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14798 * @param pCtx The current CPU context.
14799 * @param fSameCtx Set if we have the same context information as the VMM,
14800 * clear if we may have already executed an instruction in
14801 * our debug context. When clear, we assume IEMCPU holds
14802 * valid CPU mode info.
14803 */
14804IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14805{
14806# ifdef IN_RING3
14807 if (LogIs2Enabled())
14808 {
14809 char szInstr[256];
14810 uint32_t cbInstr = 0;
14811 if (fSameCtx)
14812 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14813 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14814 szInstr, sizeof(szInstr), &cbInstr);
14815 else
14816 {
14817 uint32_t fFlags = 0;
14818 switch (pVCpu->iem.s.enmCpuMode)
14819 {
14820 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14821 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14822 case IEMMODE_16BIT:
14823 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14824 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14825 else
14826 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14827 break;
14828 }
14829 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14830 szInstr, sizeof(szInstr), &cbInstr);
14831 }
14832
14833 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14834 Log2(("****\n"
14835 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14836 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14837 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14838 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14839 " %s\n"
14840 ,
14841 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14842 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14843 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14844 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14845 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14846 szInstr));
14847
14848 if (LogIs3Enabled())
14849 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14850 }
14851 else
14852# endif
14853 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14854 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14855 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14856}
14857#endif
14858
14859
14860/**
14861 * Makes status code addjustments (pass up from I/O and access handler)
14862 * as well as maintaining statistics.
14863 *
14864 * @returns Strict VBox status code to pass up.
14865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14866 * @param rcStrict The status from executing an instruction.
14867 */
14868DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14869{
14870 if (rcStrict != VINF_SUCCESS)
14871 {
14872 if (RT_SUCCESS(rcStrict))
14873 {
14874 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14875 || rcStrict == VINF_IOM_R3_IOPORT_READ
14876 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14877 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14878 || rcStrict == VINF_IOM_R3_MMIO_READ
14879 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14880 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14881 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14882 || rcStrict == VINF_CPUM_R3_MSR_READ
14883 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14884 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14885 || rcStrict == VINF_EM_RAW_TO_R3
14886 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14887 || rcStrict == VINF_EM_TRIPLE_FAULT
14888 /* raw-mode / virt handlers only: */
14889 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14890 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14891 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14892 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14893 || rcStrict == VINF_SELM_SYNC_GDT
14894 || rcStrict == VINF_CSAM_PENDING_ACTION
14895 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14896 /* nested hw.virt codes: */
14897 || rcStrict == VINF_SVM_VMEXIT
14898 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14899/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14900 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14901#ifdef VBOX_WITH_NESTED_HWVIRT
14902 if ( rcStrict == VINF_SVM_VMEXIT
14903 && rcPassUp == VINF_SUCCESS)
14904 rcStrict = VINF_SUCCESS;
14905 else
14906#endif
14907 if (rcPassUp == VINF_SUCCESS)
14908 pVCpu->iem.s.cRetInfStatuses++;
14909 else if ( rcPassUp < VINF_EM_FIRST
14910 || rcPassUp > VINF_EM_LAST
14911 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14912 {
14913 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14914 pVCpu->iem.s.cRetPassUpStatus++;
14915 rcStrict = rcPassUp;
14916 }
14917 else
14918 {
14919 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14920 pVCpu->iem.s.cRetInfStatuses++;
14921 }
14922 }
14923 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14924 pVCpu->iem.s.cRetAspectNotImplemented++;
14925 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14926 pVCpu->iem.s.cRetInstrNotImplemented++;
14927#ifdef IEM_VERIFICATION_MODE_FULL
14928 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14929 rcStrict = VINF_SUCCESS;
14930#endif
14931 else
14932 pVCpu->iem.s.cRetErrStatuses++;
14933 }
14934 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14935 {
14936 pVCpu->iem.s.cRetPassUpStatus++;
14937 rcStrict = pVCpu->iem.s.rcPassUp;
14938 }
14939
14940 return rcStrict;
14941}
14942
14943
14944/**
14945 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14946 * IEMExecOneWithPrefetchedByPC.
14947 *
14948 * Similar code is found in IEMExecLots.
14949 *
14950 * @return Strict VBox status code.
14951 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14953 * @param fExecuteInhibit If set, execute the instruction following CLI,
14954 * POP SS and MOV SS,GR.
14955 */
14956DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14957{
14958#ifdef IEM_WITH_SETJMP
14959 VBOXSTRICTRC rcStrict;
14960 jmp_buf JmpBuf;
14961 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14962 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14963 if ((rcStrict = setjmp(JmpBuf)) == 0)
14964 {
14965 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14966 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14967 }
14968 else
14969 pVCpu->iem.s.cLongJumps++;
14970 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14971#else
14972 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14973 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14974#endif
14975 if (rcStrict == VINF_SUCCESS)
14976 pVCpu->iem.s.cInstructions++;
14977 if (pVCpu->iem.s.cActiveMappings > 0)
14978 {
14979 Assert(rcStrict != VINF_SUCCESS);
14980 iemMemRollback(pVCpu);
14981 }
14982//#ifdef DEBUG
14983// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14984//#endif
14985
14986 /* Execute the next instruction as well if a cli, pop ss or
14987 mov ss, Gr has just completed successfully. */
14988 if ( fExecuteInhibit
14989 && rcStrict == VINF_SUCCESS
14990 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14991 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14992 {
14993 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14994 if (rcStrict == VINF_SUCCESS)
14995 {
14996#ifdef LOG_ENABLED
14997 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14998#endif
14999#ifdef IEM_WITH_SETJMP
15000 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15001 if ((rcStrict = setjmp(JmpBuf)) == 0)
15002 {
15003 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15004 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15005 }
15006 else
15007 pVCpu->iem.s.cLongJumps++;
15008 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15009#else
15010 IEM_OPCODE_GET_NEXT_U8(&b);
15011 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15012#endif
15013 if (rcStrict == VINF_SUCCESS)
15014 pVCpu->iem.s.cInstructions++;
15015 if (pVCpu->iem.s.cActiveMappings > 0)
15016 {
15017 Assert(rcStrict != VINF_SUCCESS);
15018 iemMemRollback(pVCpu);
15019 }
15020 }
15021 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15022 }
15023
15024 /*
15025 * Return value fiddling, statistics and sanity assertions.
15026 */
15027 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15028
15029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15030 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15031#if defined(IEM_VERIFICATION_MODE_FULL)
15032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15036#endif
15037 return rcStrict;
15038}
15039
15040
15041#ifdef IN_RC
15042/**
15043 * Re-enters raw-mode or ensure we return to ring-3.
15044 *
15045 * @returns rcStrict, maybe modified.
15046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15047 * @param pCtx The current CPU context.
15048 * @param rcStrict The status code returne by the interpreter.
15049 */
15050DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15051{
15052 if ( !pVCpu->iem.s.fInPatchCode
15053 && ( rcStrict == VINF_SUCCESS
15054 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15055 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15056 {
15057 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15058 CPUMRawEnter(pVCpu);
15059 else
15060 {
15061 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15062 rcStrict = VINF_EM_RESCHEDULE;
15063 }
15064 }
15065 return rcStrict;
15066}
15067#endif
15068
15069
15070/**
15071 * Execute one instruction.
15072 *
15073 * @return Strict VBox status code.
15074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15075 */
15076VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15077{
15078#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15079 if (++pVCpu->iem.s.cVerifyDepth == 1)
15080 iemExecVerificationModeSetup(pVCpu);
15081#endif
15082#ifdef LOG_ENABLED
15083 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15084 iemLogCurInstr(pVCpu, pCtx, true);
15085#endif
15086
15087 /*
15088 * Do the decoding and emulation.
15089 */
15090 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15091 if (rcStrict == VINF_SUCCESS)
15092 rcStrict = iemExecOneInner(pVCpu, true);
15093
15094#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15095 /*
15096 * Assert some sanity.
15097 */
15098 if (pVCpu->iem.s.cVerifyDepth == 1)
15099 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15100 pVCpu->iem.s.cVerifyDepth--;
15101#endif
15102#ifdef IN_RC
15103 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15104#endif
15105 if (rcStrict != VINF_SUCCESS)
15106 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15107 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15108 return rcStrict;
15109}
15110
15111
15112VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15113{
15114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15115 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15116
15117 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15118 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15119 if (rcStrict == VINF_SUCCESS)
15120 {
15121 rcStrict = iemExecOneInner(pVCpu, true);
15122 if (pcbWritten)
15123 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15124 }
15125
15126#ifdef IN_RC
15127 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15128#endif
15129 return rcStrict;
15130}
15131
15132
15133VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15134 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15135{
15136 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15137 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15138
15139 VBOXSTRICTRC rcStrict;
15140 if ( cbOpcodeBytes
15141 && pCtx->rip == OpcodeBytesPC)
15142 {
15143 iemInitDecoder(pVCpu, false);
15144#ifdef IEM_WITH_CODE_TLB
15145 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15146 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15147 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15148 pVCpu->iem.s.offCurInstrStart = 0;
15149 pVCpu->iem.s.offInstrNextByte = 0;
15150#else
15151 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15152 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15153#endif
15154 rcStrict = VINF_SUCCESS;
15155 }
15156 else
15157 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15158 if (rcStrict == VINF_SUCCESS)
15159 {
15160 rcStrict = iemExecOneInner(pVCpu, true);
15161 }
15162
15163#ifdef IN_RC
15164 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15165#endif
15166 return rcStrict;
15167}
15168
15169
15170VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15171{
15172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15173 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15174
15175 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15176 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15177 if (rcStrict == VINF_SUCCESS)
15178 {
15179 rcStrict = iemExecOneInner(pVCpu, false);
15180 if (pcbWritten)
15181 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15182 }
15183
15184#ifdef IN_RC
15185 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15186#endif
15187 return rcStrict;
15188}
15189
15190
15191VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15192 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15193{
15194 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15195 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15196
15197 VBOXSTRICTRC rcStrict;
15198 if ( cbOpcodeBytes
15199 && pCtx->rip == OpcodeBytesPC)
15200 {
15201 iemInitDecoder(pVCpu, true);
15202#ifdef IEM_WITH_CODE_TLB
15203 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15204 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15205 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15206 pVCpu->iem.s.offCurInstrStart = 0;
15207 pVCpu->iem.s.offInstrNextByte = 0;
15208#else
15209 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15210 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15211#endif
15212 rcStrict = VINF_SUCCESS;
15213 }
15214 else
15215 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15216 if (rcStrict == VINF_SUCCESS)
15217 rcStrict = iemExecOneInner(pVCpu, false);
15218
15219#ifdef IN_RC
15220 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15221#endif
15222 return rcStrict;
15223}
15224
15225
15226/**
15227 * For debugging DISGetParamSize, may come in handy.
15228 *
15229 * @returns Strict VBox status code.
15230 * @param pVCpu The cross context virtual CPU structure of the
15231 * calling EMT.
15232 * @param pCtxCore The context core structure.
15233 * @param OpcodeBytesPC The PC of the opcode bytes.
15234 * @param pvOpcodeBytes Prefeched opcode bytes.
15235 * @param cbOpcodeBytes Number of prefetched bytes.
15236 * @param pcbWritten Where to return the number of bytes written.
15237 * Optional.
15238 */
15239VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15240 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15241 uint32_t *pcbWritten)
15242{
15243 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15244 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15245
15246 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15247 VBOXSTRICTRC rcStrict;
15248 if ( cbOpcodeBytes
15249 && pCtx->rip == OpcodeBytesPC)
15250 {
15251 iemInitDecoder(pVCpu, true);
15252#ifdef IEM_WITH_CODE_TLB
15253 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15254 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15255 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15256 pVCpu->iem.s.offCurInstrStart = 0;
15257 pVCpu->iem.s.offInstrNextByte = 0;
15258#else
15259 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15260 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15261#endif
15262 rcStrict = VINF_SUCCESS;
15263 }
15264 else
15265 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15266 if (rcStrict == VINF_SUCCESS)
15267 {
15268 rcStrict = iemExecOneInner(pVCpu, false);
15269 if (pcbWritten)
15270 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15271 }
15272
15273#ifdef IN_RC
15274 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15275#endif
15276 return rcStrict;
15277}
15278
15279
15280VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15281{
15282 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15283
15284#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15285 /*
15286 * See if there is an interrupt pending in TRPM, inject it if we can.
15287 */
15288 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15289# ifdef IEM_VERIFICATION_MODE_FULL
15290 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15291# endif
15292
15293 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15294# if defined(VBOX_WITH_NESTED_HWVIRT)
15295 bool fIntrEnabled = pCtx->hwvirt.Gif;
15296 if (fIntrEnabled)
15297 {
15298 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15299 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15300 else
15301 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15302 }
15303# else
15304 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15305# endif
15306 if ( fIntrEnabled
15307 && TRPMHasTrap(pVCpu)
15308 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15309 {
15310 uint8_t u8TrapNo;
15311 TRPMEVENT enmType;
15312 RTGCUINT uErrCode;
15313 RTGCPTR uCr2;
15314 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15315 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15316 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15317 TRPMResetTrap(pVCpu);
15318 }
15319
15320 /*
15321 * Log the state.
15322 */
15323# ifdef LOG_ENABLED
15324 iemLogCurInstr(pVCpu, pCtx, true);
15325# endif
15326
15327 /*
15328 * Do the decoding and emulation.
15329 */
15330 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15331 if (rcStrict == VINF_SUCCESS)
15332 rcStrict = iemExecOneInner(pVCpu, true);
15333
15334 /*
15335 * Assert some sanity.
15336 */
15337 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15338
15339 /*
15340 * Log and return.
15341 */
15342 if (rcStrict != VINF_SUCCESS)
15343 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15344 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15345 if (pcInstructions)
15346 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15347 return rcStrict;
15348
15349#else /* Not verification mode */
15350
15351 /*
15352 * See if there is an interrupt pending in TRPM, inject it if we can.
15353 */
15354 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15355# ifdef IEM_VERIFICATION_MODE_FULL
15356 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15357# endif
15358
15359 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15360# if defined(VBOX_WITH_NESTED_HWVIRT)
15361 bool fIntrEnabled = pCtx->hwvirt.fGif;
15362 if (fIntrEnabled)
15363 {
15364 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15365 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15366 else
15367 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15368 }
15369# else
15370 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15371# endif
15372 if ( fIntrEnabled
15373 && TRPMHasTrap(pVCpu)
15374 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15375 {
15376 uint8_t u8TrapNo;
15377 TRPMEVENT enmType;
15378 RTGCUINT uErrCode;
15379 RTGCPTR uCr2;
15380 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15381 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15382 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15383 TRPMResetTrap(pVCpu);
15384 }
15385
15386 /*
15387 * Initial decoder init w/ prefetch, then setup setjmp.
15388 */
15389 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15390 if (rcStrict == VINF_SUCCESS)
15391 {
15392# ifdef IEM_WITH_SETJMP
15393 jmp_buf JmpBuf;
15394 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15395 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15396 pVCpu->iem.s.cActiveMappings = 0;
15397 if ((rcStrict = setjmp(JmpBuf)) == 0)
15398# endif
15399 {
15400 /*
15401 * The run loop. We limit ourselves to 4096 instructions right now.
15402 */
15403 PVM pVM = pVCpu->CTX_SUFF(pVM);
15404 uint32_t cInstr = 4096;
15405 for (;;)
15406 {
15407 /*
15408 * Log the state.
15409 */
15410# ifdef LOG_ENABLED
15411 iemLogCurInstr(pVCpu, pCtx, true);
15412# endif
15413
15414 /*
15415 * Do the decoding and emulation.
15416 */
15417 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15418 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15419 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15420 {
15421 Assert(pVCpu->iem.s.cActiveMappings == 0);
15422 pVCpu->iem.s.cInstructions++;
15423 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15424 {
15425 uint32_t fCpu = pVCpu->fLocalForcedActions
15426 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15427 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15428 | VMCPU_FF_TLB_FLUSH
15429# ifdef VBOX_WITH_RAW_MODE
15430 | VMCPU_FF_TRPM_SYNC_IDT
15431 | VMCPU_FF_SELM_SYNC_TSS
15432 | VMCPU_FF_SELM_SYNC_GDT
15433 | VMCPU_FF_SELM_SYNC_LDT
15434# endif
15435 | VMCPU_FF_INHIBIT_INTERRUPTS
15436 | VMCPU_FF_BLOCK_NMIS
15437 | VMCPU_FF_UNHALT ));
15438
15439 if (RT_LIKELY( ( !fCpu
15440 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15441 && !pCtx->rflags.Bits.u1IF) )
15442 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15443 {
15444 if (cInstr-- > 0)
15445 {
15446 Assert(pVCpu->iem.s.cActiveMappings == 0);
15447 iemReInitDecoder(pVCpu);
15448 continue;
15449 }
15450 }
15451 }
15452 Assert(pVCpu->iem.s.cActiveMappings == 0);
15453 }
15454 else if (pVCpu->iem.s.cActiveMappings > 0)
15455 iemMemRollback(pVCpu);
15456 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15457 break;
15458 }
15459 }
15460# ifdef IEM_WITH_SETJMP
15461 else
15462 {
15463 if (pVCpu->iem.s.cActiveMappings > 0)
15464 iemMemRollback(pVCpu);
15465 pVCpu->iem.s.cLongJumps++;
15466 }
15467 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15468# endif
15469
15470 /*
15471 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15472 */
15473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15474 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15475# if defined(IEM_VERIFICATION_MODE_FULL)
15476 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15477 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15478 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15479 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15480# endif
15481 }
15482# ifdef VBOX_WITH_NESTED_HWVIRT
15483 else
15484 {
15485 /*
15486 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15487 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15488 */
15489 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15490 }
15491# endif
15492
15493 /*
15494 * Maybe re-enter raw-mode and log.
15495 */
15496# ifdef IN_RC
15497 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15498# endif
15499 if (rcStrict != VINF_SUCCESS)
15500 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15501 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15502 if (pcInstructions)
15503 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15504 return rcStrict;
15505#endif /* Not verification mode */
15506}
15507
15508
15509
15510/**
15511 * Injects a trap, fault, abort, software interrupt or external interrupt.
15512 *
15513 * The parameter list matches TRPMQueryTrapAll pretty closely.
15514 *
15515 * @returns Strict VBox status code.
15516 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15517 * @param u8TrapNo The trap number.
15518 * @param enmType What type is it (trap/fault/abort), software
15519 * interrupt or hardware interrupt.
15520 * @param uErrCode The error code if applicable.
15521 * @param uCr2 The CR2 value if applicable.
15522 * @param cbInstr The instruction length (only relevant for
15523 * software interrupts).
15524 */
15525VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15526 uint8_t cbInstr)
15527{
15528 iemInitDecoder(pVCpu, false);
15529#ifdef DBGFTRACE_ENABLED
15530 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15531 u8TrapNo, enmType, uErrCode, uCr2);
15532#endif
15533
15534 uint32_t fFlags;
15535 switch (enmType)
15536 {
15537 case TRPM_HARDWARE_INT:
15538 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15539 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15540 uErrCode = uCr2 = 0;
15541 break;
15542
15543 case TRPM_SOFTWARE_INT:
15544 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15545 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15546 uErrCode = uCr2 = 0;
15547 break;
15548
15549 case TRPM_TRAP:
15550 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15551 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15552 if (u8TrapNo == X86_XCPT_PF)
15553 fFlags |= IEM_XCPT_FLAGS_CR2;
15554 switch (u8TrapNo)
15555 {
15556 case X86_XCPT_DF:
15557 case X86_XCPT_TS:
15558 case X86_XCPT_NP:
15559 case X86_XCPT_SS:
15560 case X86_XCPT_PF:
15561 case X86_XCPT_AC:
15562 fFlags |= IEM_XCPT_FLAGS_ERR;
15563 break;
15564
15565 case X86_XCPT_NMI:
15566 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15567 break;
15568 }
15569 break;
15570
15571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15572 }
15573
15574 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15575}
15576
15577
15578/**
15579 * Injects the active TRPM event.
15580 *
15581 * @returns Strict VBox status code.
15582 * @param pVCpu The cross context virtual CPU structure.
15583 */
15584VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15585{
15586#ifndef IEM_IMPLEMENTS_TASKSWITCH
15587 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15588#else
15589 uint8_t u8TrapNo;
15590 TRPMEVENT enmType;
15591 RTGCUINT uErrCode;
15592 RTGCUINTPTR uCr2;
15593 uint8_t cbInstr;
15594 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15595 if (RT_FAILURE(rc))
15596 return rc;
15597
15598 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15599
15600 /** @todo Are there any other codes that imply the event was successfully
15601 * delivered to the guest? See @bugref{6607}. */
15602 if ( rcStrict == VINF_SUCCESS
15603 || rcStrict == VINF_IEM_RAISED_XCPT)
15604 {
15605 TRPMResetTrap(pVCpu);
15606 }
15607 return rcStrict;
15608#endif
15609}
15610
15611
15612VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15613{
15614 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15615 return VERR_NOT_IMPLEMENTED;
15616}
15617
15618
15619VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15620{
15621 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15622 return VERR_NOT_IMPLEMENTED;
15623}
15624
15625
15626#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15627/**
15628 * Executes a IRET instruction with default operand size.
15629 *
15630 * This is for PATM.
15631 *
15632 * @returns VBox status code.
15633 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15634 * @param pCtxCore The register frame.
15635 */
15636VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15637{
15638 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15639
15640 iemCtxCoreToCtx(pCtx, pCtxCore);
15641 iemInitDecoder(pVCpu);
15642 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15643 if (rcStrict == VINF_SUCCESS)
15644 iemCtxToCtxCore(pCtxCore, pCtx);
15645 else
15646 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15647 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15648 return rcStrict;
15649}
15650#endif
15651
15652
15653/**
15654 * Macro used by the IEMExec* method to check the given instruction length.
15655 *
15656 * Will return on failure!
15657 *
15658 * @param a_cbInstr The given instruction length.
15659 * @param a_cbMin The minimum length.
15660 */
15661#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15662 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15663 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15664
15665
15666/**
15667 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15668 *
15669 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15670 *
15671 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15673 * @param rcStrict The status code to fiddle.
15674 */
15675DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15676{
15677 iemUninitExec(pVCpu);
15678#ifdef IN_RC
15679 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15680 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15681#else
15682 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15683#endif
15684}
15685
15686
15687/**
15688 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15689 *
15690 * This API ASSUMES that the caller has already verified that the guest code is
15691 * allowed to access the I/O port. (The I/O port is in the DX register in the
15692 * guest state.)
15693 *
15694 * @returns Strict VBox status code.
15695 * @param pVCpu The cross context virtual CPU structure.
15696 * @param cbValue The size of the I/O port access (1, 2, or 4).
15697 * @param enmAddrMode The addressing mode.
15698 * @param fRepPrefix Indicates whether a repeat prefix is used
15699 * (doesn't matter which for this instruction).
15700 * @param cbInstr The instruction length in bytes.
15701 * @param iEffSeg The effective segment address.
15702 * @param fIoChecked Whether the access to the I/O port has been
15703 * checked or not. It's typically checked in the
15704 * HM scenario.
15705 */
15706VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15707 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15708{
15709 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15710 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15711
15712 /*
15713 * State init.
15714 */
15715 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15716
15717 /*
15718 * Switch orgy for getting to the right handler.
15719 */
15720 VBOXSTRICTRC rcStrict;
15721 if (fRepPrefix)
15722 {
15723 switch (enmAddrMode)
15724 {
15725 case IEMMODE_16BIT:
15726 switch (cbValue)
15727 {
15728 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15729 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15730 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15731 default:
15732 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15733 }
15734 break;
15735
15736 case IEMMODE_32BIT:
15737 switch (cbValue)
15738 {
15739 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15740 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15741 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15742 default:
15743 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15744 }
15745 break;
15746
15747 case IEMMODE_64BIT:
15748 switch (cbValue)
15749 {
15750 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15751 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15752 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15753 default:
15754 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15755 }
15756 break;
15757
15758 default:
15759 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15760 }
15761 }
15762 else
15763 {
15764 switch (enmAddrMode)
15765 {
15766 case IEMMODE_16BIT:
15767 switch (cbValue)
15768 {
15769 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15770 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15771 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15772 default:
15773 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15774 }
15775 break;
15776
15777 case IEMMODE_32BIT:
15778 switch (cbValue)
15779 {
15780 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15781 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15782 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15783 default:
15784 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15785 }
15786 break;
15787
15788 case IEMMODE_64BIT:
15789 switch (cbValue)
15790 {
15791 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15792 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15793 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15794 default:
15795 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15796 }
15797 break;
15798
15799 default:
15800 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15801 }
15802 }
15803
15804 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15805}
15806
15807
15808/**
15809 * Interface for HM and EM for executing string I/O IN (read) instructions.
15810 *
15811 * This API ASSUMES that the caller has already verified that the guest code is
15812 * allowed to access the I/O port. (The I/O port is in the DX register in the
15813 * guest state.)
15814 *
15815 * @returns Strict VBox status code.
15816 * @param pVCpu The cross context virtual CPU structure.
15817 * @param cbValue The size of the I/O port access (1, 2, or 4).
15818 * @param enmAddrMode The addressing mode.
15819 * @param fRepPrefix Indicates whether a repeat prefix is used
15820 * (doesn't matter which for this instruction).
15821 * @param cbInstr The instruction length in bytes.
15822 * @param fIoChecked Whether the access to the I/O port has been
15823 * checked or not. It's typically checked in the
15824 * HM scenario.
15825 */
15826VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15827 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15828{
15829 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15830
15831 /*
15832 * State init.
15833 */
15834 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15835
15836 /*
15837 * Switch orgy for getting to the right handler.
15838 */
15839 VBOXSTRICTRC rcStrict;
15840 if (fRepPrefix)
15841 {
15842 switch (enmAddrMode)
15843 {
15844 case IEMMODE_16BIT:
15845 switch (cbValue)
15846 {
15847 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15848 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15849 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15850 default:
15851 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15852 }
15853 break;
15854
15855 case IEMMODE_32BIT:
15856 switch (cbValue)
15857 {
15858 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15859 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15860 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15861 default:
15862 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15863 }
15864 break;
15865
15866 case IEMMODE_64BIT:
15867 switch (cbValue)
15868 {
15869 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15870 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15871 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15872 default:
15873 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15874 }
15875 break;
15876
15877 default:
15878 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15879 }
15880 }
15881 else
15882 {
15883 switch (enmAddrMode)
15884 {
15885 case IEMMODE_16BIT:
15886 switch (cbValue)
15887 {
15888 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15889 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15890 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15891 default:
15892 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15893 }
15894 break;
15895
15896 case IEMMODE_32BIT:
15897 switch (cbValue)
15898 {
15899 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15900 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15901 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15902 default:
15903 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15904 }
15905 break;
15906
15907 case IEMMODE_64BIT:
15908 switch (cbValue)
15909 {
15910 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15911 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15912 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15913 default:
15914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15915 }
15916 break;
15917
15918 default:
15919 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15920 }
15921 }
15922
15923 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15924}
15925
15926
15927/**
15928 * Interface for rawmode to write execute an OUT instruction.
15929 *
15930 * @returns Strict VBox status code.
15931 * @param pVCpu The cross context virtual CPU structure.
15932 * @param cbInstr The instruction length in bytes.
15933 * @param u16Port The port to read.
15934 * @param cbReg The register size.
15935 *
15936 * @remarks In ring-0 not all of the state needs to be synced in.
15937 */
15938VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15939{
15940 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15941 Assert(cbReg <= 4 && cbReg != 3);
15942
15943 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15944 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15945 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15946}
15947
15948
15949/**
15950 * Interface for rawmode to write execute an IN instruction.
15951 *
15952 * @returns Strict VBox status code.
15953 * @param pVCpu The cross context virtual CPU structure.
15954 * @param cbInstr The instruction length in bytes.
15955 * @param u16Port The port to read.
15956 * @param cbReg The register size.
15957 */
15958VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15959{
15960 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15961 Assert(cbReg <= 4 && cbReg != 3);
15962
15963 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15964 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15965 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15966}
15967
15968
15969/**
15970 * Interface for HM and EM to write to a CRx register.
15971 *
15972 * @returns Strict VBox status code.
15973 * @param pVCpu The cross context virtual CPU structure.
15974 * @param cbInstr The instruction length in bytes.
15975 * @param iCrReg The control register number (destination).
15976 * @param iGReg The general purpose register number (source).
15977 *
15978 * @remarks In ring-0 not all of the state needs to be synced in.
15979 */
15980VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15981{
15982 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15983 Assert(iCrReg < 16);
15984 Assert(iGReg < 16);
15985
15986 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15987 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15988 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15989}
15990
15991
15992/**
15993 * Interface for HM and EM to read from a CRx register.
15994 *
15995 * @returns Strict VBox status code.
15996 * @param pVCpu The cross context virtual CPU structure.
15997 * @param cbInstr The instruction length in bytes.
15998 * @param iGReg The general purpose register number (destination).
15999 * @param iCrReg The control register number (source).
16000 *
16001 * @remarks In ring-0 not all of the state needs to be synced in.
16002 */
16003VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
16004{
16005 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16006 Assert(iCrReg < 16);
16007 Assert(iGReg < 16);
16008
16009 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16010 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16011 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16012}
16013
16014
16015/**
16016 * Interface for HM and EM to clear the CR0[TS] bit.
16017 *
16018 * @returns Strict VBox status code.
16019 * @param pVCpu The cross context virtual CPU structure.
16020 * @param cbInstr The instruction length in bytes.
16021 *
16022 * @remarks In ring-0 not all of the state needs to be synced in.
16023 */
16024VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16025{
16026 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16027
16028 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16030 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16031}
16032
16033
16034/**
16035 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16036 *
16037 * @returns Strict VBox status code.
16038 * @param pVCpu The cross context virtual CPU structure.
16039 * @param cbInstr The instruction length in bytes.
16040 * @param uValue The value to load into CR0.
16041 *
16042 * @remarks In ring-0 not all of the state needs to be synced in.
16043 */
16044VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16045{
16046 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16047
16048 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16049 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16050 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16051}
16052
16053
16054/**
16055 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16056 *
16057 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16058 *
16059 * @returns Strict VBox status code.
16060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16061 * @param cbInstr The instruction length in bytes.
16062 * @remarks In ring-0 not all of the state needs to be synced in.
16063 * @thread EMT(pVCpu)
16064 */
16065VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16066{
16067 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16068
16069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16071 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16072}
16073
16074
16075/**
16076 * Interface for HM and EM to emulate the INVLPG instruction.
16077 *
16078 * @param pVCpu The cross context virtual CPU structure.
16079 * @param cbInstr The instruction length in bytes.
16080 * @param GCPtrPage The effective address of the page to invalidate.
16081 *
16082 * @remarks In ring-0 not all of the state needs to be synced in.
16083 */
16084VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16085{
16086 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16087
16088 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16089 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16091}
16092
16093
16094/**
16095 * Interface for HM and EM to emulate the INVPCID instruction.
16096 *
16097 * @param pVCpu The cross context virtual CPU structure.
16098 * @param cbInstr The instruction length in bytes.
16099 * @param uType The invalidation type.
16100 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16101 *
16102 * @remarks In ring-0 not all of the state needs to be synced in.
16103 */
16104VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16105{
16106 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16107
16108 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16109 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16111}
16112
16113
16114/**
16115 * Checks if IEM is in the process of delivering an event (interrupt or
16116 * exception).
16117 *
16118 * @returns true if we're in the process of raising an interrupt or exception,
16119 * false otherwise.
16120 * @param pVCpu The cross context virtual CPU structure.
16121 * @param puVector Where to store the vector associated with the
16122 * currently delivered event, optional.
16123 * @param pfFlags Where to store th event delivery flags (see
16124 * IEM_XCPT_FLAGS_XXX), optional.
16125 * @param puErr Where to store the error code associated with the
16126 * event, optional.
16127 * @param puCr2 Where to store the CR2 associated with the event,
16128 * optional.
16129 * @remarks The caller should check the flags to determine if the error code and
16130 * CR2 are valid for the event.
16131 */
16132VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16133{
16134 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16135 if (fRaisingXcpt)
16136 {
16137 if (puVector)
16138 *puVector = pVCpu->iem.s.uCurXcpt;
16139 if (pfFlags)
16140 *pfFlags = pVCpu->iem.s.fCurXcpt;
16141 if (puErr)
16142 *puErr = pVCpu->iem.s.uCurXcptErr;
16143 if (puCr2)
16144 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16145 }
16146 return fRaisingXcpt;
16147}
16148
16149#ifdef VBOX_WITH_NESTED_HWVIRT
16150/**
16151 * Interface for HM and EM to emulate the CLGI instruction.
16152 *
16153 * @returns Strict VBox status code.
16154 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16155 * @param cbInstr The instruction length in bytes.
16156 * @thread EMT(pVCpu)
16157 */
16158VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16159{
16160 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16161
16162 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16163 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16164 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16165}
16166
16167
16168/**
16169 * Interface for HM and EM to emulate the STGI instruction.
16170 *
16171 * @returns Strict VBox status code.
16172 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16173 * @param cbInstr The instruction length in bytes.
16174 * @thread EMT(pVCpu)
16175 */
16176VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16177{
16178 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16179
16180 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16181 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16182 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16183}
16184
16185
16186/**
16187 * Interface for HM and EM to emulate the VMLOAD instruction.
16188 *
16189 * @returns Strict VBox status code.
16190 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16191 * @param cbInstr The instruction length in bytes.
16192 * @thread EMT(pVCpu)
16193 */
16194VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16195{
16196 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16197
16198 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16199 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16201}
16202
16203
16204/**
16205 * Interface for HM and EM to emulate the VMSAVE instruction.
16206 *
16207 * @returns Strict VBox status code.
16208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16209 * @param cbInstr The instruction length in bytes.
16210 * @thread EMT(pVCpu)
16211 */
16212VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16213{
16214 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16215
16216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16218 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16219}
16220
16221
16222/**
16223 * Interface for HM and EM to emulate the INVLPGA instruction.
16224 *
16225 * @returns Strict VBox status code.
16226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16227 * @param cbInstr The instruction length in bytes.
16228 * @thread EMT(pVCpu)
16229 */
16230VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16231{
16232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16233
16234 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16235 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16236 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16237}
16238
16239
16240/**
16241 * Interface for HM and EM to emulate the VMRUN instruction.
16242 *
16243 * @returns Strict VBox status code.
16244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16245 * @param cbInstr The instruction length in bytes.
16246 * @thread EMT(pVCpu)
16247 */
16248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16249{
16250 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16251
16252 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16253 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16255}
16256
16257
16258/**
16259 * Interface for HM and EM to emulate \#VMEXIT.
16260 *
16261 * @returns Strict VBox status code.
16262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16263 * @param uExitCode The exit code.
16264 * @param uExitInfo1 The exit info. 1 field.
16265 * @param uExitInfo2 The exit info. 2 field.
16266 * @thread EMT(pVCpu)
16267 */
16268VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16269{
16270 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16271 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16272}
16273#endif /* VBOX_WITH_NESTED_HWVIRT */
16274
16275#ifdef IN_RING3
16276
16277/**
16278 * Handles the unlikely and probably fatal merge cases.
16279 *
16280 * @returns Merged status code.
16281 * @param rcStrict Current EM status code.
16282 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16283 * with @a rcStrict.
16284 * @param iMemMap The memory mapping index. For error reporting only.
16285 * @param pVCpu The cross context virtual CPU structure of the calling
16286 * thread, for error reporting only.
16287 */
16288DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16289 unsigned iMemMap, PVMCPU pVCpu)
16290{
16291 if (RT_FAILURE_NP(rcStrict))
16292 return rcStrict;
16293
16294 if (RT_FAILURE_NP(rcStrictCommit))
16295 return rcStrictCommit;
16296
16297 if (rcStrict == rcStrictCommit)
16298 return rcStrictCommit;
16299
16300 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16301 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16302 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16304 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16305 return VERR_IOM_FF_STATUS_IPE;
16306}
16307
16308
16309/**
16310 * Helper for IOMR3ProcessForceFlag.
16311 *
16312 * @returns Merged status code.
16313 * @param rcStrict Current EM status code.
16314 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16315 * with @a rcStrict.
16316 * @param iMemMap The memory mapping index. For error reporting only.
16317 * @param pVCpu The cross context virtual CPU structure of the calling
16318 * thread, for error reporting only.
16319 */
16320DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16321{
16322 /* Simple. */
16323 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16324 return rcStrictCommit;
16325
16326 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16327 return rcStrict;
16328
16329 /* EM scheduling status codes. */
16330 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16331 && rcStrict <= VINF_EM_LAST))
16332 {
16333 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16334 && rcStrictCommit <= VINF_EM_LAST))
16335 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16336 }
16337
16338 /* Unlikely */
16339 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16340}
16341
16342
16343/**
16344 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16345 *
16346 * @returns Merge between @a rcStrict and what the commit operation returned.
16347 * @param pVM The cross context VM structure.
16348 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16349 * @param rcStrict The status code returned by ring-0 or raw-mode.
16350 */
16351VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16352{
16353 /*
16354 * Reset the pending commit.
16355 */
16356 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16357 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16358 ("%#x %#x %#x\n",
16359 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16361
16362 /*
16363 * Commit the pending bounce buffers (usually just one).
16364 */
16365 unsigned cBufs = 0;
16366 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16367 while (iMemMap-- > 0)
16368 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16369 {
16370 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16371 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16372 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16373
16374 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16375 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16376 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16377
16378 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16379 {
16380 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16382 pbBuf,
16383 cbFirst,
16384 PGMACCESSORIGIN_IEM);
16385 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16386 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16387 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16388 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16389 }
16390
16391 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16392 {
16393 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16395 pbBuf + cbFirst,
16396 cbSecond,
16397 PGMACCESSORIGIN_IEM);
16398 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16399 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16400 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16401 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16402 }
16403 cBufs++;
16404 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16405 }
16406
16407 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16408 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16409 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16410 pVCpu->iem.s.cActiveMappings = 0;
16411 return rcStrict;
16412}
16413
16414#endif /* IN_RING3 */
16415
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette