VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72209

Last change on this file since 72209 was 72209, checked in by vboxsync, 7 years ago

VMM/IEM: VBOX_WITH_NESTED_HWVIRT_SVM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 639.2 KB
Line 
1/* $Id: IEMAll.cpp 72209 2018-05-15 04:12:25Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
415 */
416# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
417 do { \
418 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
419 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
420 } while (0)
421
422/**
423 * Check if an SVM is enabled.
424 */
425# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
426
427/**
428 * Check if an SVM control/instruction intercept is set.
429 */
430# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
431
432/**
433 * Check if an SVM read CRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
436
437/**
438 * Check if an SVM write CRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
441
442/**
443 * Check if an SVM read DRx intercept is set.
444 */
445# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
446
447/**
448 * Check if an SVM write DRx intercept is set.
449 */
450# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
451
452/**
453 * Check if an SVM exception intercept is set.
454 */
455# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
456
457/**
458 * Get the SVM pause-filter count.
459 */
460# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
461
462/**
463 * Invokes the SVM \#VMEXIT handler for the nested-guest.
464 */
465# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
466 do \
467 { \
468 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
469 } while (0)
470
471/**
472 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
473 * corresponding decode assist information.
474 */
475# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
476 do \
477 { \
478 uint64_t uExitInfo1; \
479 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
480 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
481 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
482 else \
483 uExitInfo1 = 0; \
484 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
485 } while (0)
486
487#else
488# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
489# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
490# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
491# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
492# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
493# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
494# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
496# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
497# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
498# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
499# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
500
501#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
502
503
504/*********************************************************************************************************************************
505* Global Variables *
506*********************************************************************************************************************************/
507extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
508
509
510/** Function table for the ADD instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
512{
513 iemAImpl_add_u8, iemAImpl_add_u8_locked,
514 iemAImpl_add_u16, iemAImpl_add_u16_locked,
515 iemAImpl_add_u32, iemAImpl_add_u32_locked,
516 iemAImpl_add_u64, iemAImpl_add_u64_locked
517};
518
519/** Function table for the ADC instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
521{
522 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
523 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
524 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
525 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
526};
527
528/** Function table for the SUB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
530{
531 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
532 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
533 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
534 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
535};
536
537/** Function table for the SBB instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
539{
540 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
541 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
542 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
543 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
544};
545
546/** Function table for the OR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
548{
549 iemAImpl_or_u8, iemAImpl_or_u8_locked,
550 iemAImpl_or_u16, iemAImpl_or_u16_locked,
551 iemAImpl_or_u32, iemAImpl_or_u32_locked,
552 iemAImpl_or_u64, iemAImpl_or_u64_locked
553};
554
555/** Function table for the XOR instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
557{
558 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
559 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
560 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
561 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
562};
563
564/** Function table for the AND instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
566{
567 iemAImpl_and_u8, iemAImpl_and_u8_locked,
568 iemAImpl_and_u16, iemAImpl_and_u16_locked,
569 iemAImpl_and_u32, iemAImpl_and_u32_locked,
570 iemAImpl_and_u64, iemAImpl_and_u64_locked
571};
572
573/** Function table for the CMP instruction.
574 * @remarks Making operand order ASSUMPTIONS.
575 */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
577{
578 iemAImpl_cmp_u8, NULL,
579 iemAImpl_cmp_u16, NULL,
580 iemAImpl_cmp_u32, NULL,
581 iemAImpl_cmp_u64, NULL
582};
583
584/** Function table for the TEST instruction.
585 * @remarks Making operand order ASSUMPTIONS.
586 */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
588{
589 iemAImpl_test_u8, NULL,
590 iemAImpl_test_u16, NULL,
591 iemAImpl_test_u32, NULL,
592 iemAImpl_test_u64, NULL
593};
594
595/** Function table for the BT instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
597{
598 NULL, NULL,
599 iemAImpl_bt_u16, NULL,
600 iemAImpl_bt_u32, NULL,
601 iemAImpl_bt_u64, NULL
602};
603
604/** Function table for the BTC instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
606{
607 NULL, NULL,
608 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
609 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
610 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
611};
612
613/** Function table for the BTR instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
615{
616 NULL, NULL,
617 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
618 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
619 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
620};
621
622/** Function table for the BTS instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
624{
625 NULL, NULL,
626 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
627 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
628 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
629};
630
631/** Function table for the BSF instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
633{
634 NULL, NULL,
635 iemAImpl_bsf_u16, NULL,
636 iemAImpl_bsf_u32, NULL,
637 iemAImpl_bsf_u64, NULL
638};
639
640/** Function table for the BSR instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
642{
643 NULL, NULL,
644 iemAImpl_bsr_u16, NULL,
645 iemAImpl_bsr_u32, NULL,
646 iemAImpl_bsr_u64, NULL
647};
648
649/** Function table for the IMUL instruction. */
650IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
651{
652 NULL, NULL,
653 iemAImpl_imul_two_u16, NULL,
654 iemAImpl_imul_two_u32, NULL,
655 iemAImpl_imul_two_u64, NULL
656};
657
658/** Group 1 /r lookup table. */
659IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
660{
661 &g_iemAImpl_add,
662 &g_iemAImpl_or,
663 &g_iemAImpl_adc,
664 &g_iemAImpl_sbb,
665 &g_iemAImpl_and,
666 &g_iemAImpl_sub,
667 &g_iemAImpl_xor,
668 &g_iemAImpl_cmp
669};
670
671/** Function table for the INC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
673{
674 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
675 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
676 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
677 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
678};
679
680/** Function table for the DEC instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
682{
683 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
684 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
685 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
686 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
687};
688
689/** Function table for the NEG instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
691{
692 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
693 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
694 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
695 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
696};
697
698/** Function table for the NOT instruction. */
699IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
700{
701 iemAImpl_not_u8, iemAImpl_not_u8_locked,
702 iemAImpl_not_u16, iemAImpl_not_u16_locked,
703 iemAImpl_not_u32, iemAImpl_not_u32_locked,
704 iemAImpl_not_u64, iemAImpl_not_u64_locked
705};
706
707
708/** Function table for the ROL instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
710{
711 iemAImpl_rol_u8,
712 iemAImpl_rol_u16,
713 iemAImpl_rol_u32,
714 iemAImpl_rol_u64
715};
716
717/** Function table for the ROR instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
719{
720 iemAImpl_ror_u8,
721 iemAImpl_ror_u16,
722 iemAImpl_ror_u32,
723 iemAImpl_ror_u64
724};
725
726/** Function table for the RCL instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
728{
729 iemAImpl_rcl_u8,
730 iemAImpl_rcl_u16,
731 iemAImpl_rcl_u32,
732 iemAImpl_rcl_u64
733};
734
735/** Function table for the RCR instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
737{
738 iemAImpl_rcr_u8,
739 iemAImpl_rcr_u16,
740 iemAImpl_rcr_u32,
741 iemAImpl_rcr_u64
742};
743
744/** Function table for the SHL instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
746{
747 iemAImpl_shl_u8,
748 iemAImpl_shl_u16,
749 iemAImpl_shl_u32,
750 iemAImpl_shl_u64
751};
752
753/** Function table for the SHR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
755{
756 iemAImpl_shr_u8,
757 iemAImpl_shr_u16,
758 iemAImpl_shr_u32,
759 iemAImpl_shr_u64
760};
761
762/** Function table for the SAR instruction. */
763IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
764{
765 iemAImpl_sar_u8,
766 iemAImpl_sar_u16,
767 iemAImpl_sar_u32,
768 iemAImpl_sar_u64
769};
770
771
772/** Function table for the MUL instruction. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
774{
775 iemAImpl_mul_u8,
776 iemAImpl_mul_u16,
777 iemAImpl_mul_u32,
778 iemAImpl_mul_u64
779};
780
781/** Function table for the IMUL instruction working implicitly on rAX. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
783{
784 iemAImpl_imul_u8,
785 iemAImpl_imul_u16,
786 iemAImpl_imul_u32,
787 iemAImpl_imul_u64
788};
789
790/** Function table for the DIV instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
792{
793 iemAImpl_div_u8,
794 iemAImpl_div_u16,
795 iemAImpl_div_u32,
796 iemAImpl_div_u64
797};
798
799/** Function table for the MUL instruction. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
801{
802 iemAImpl_idiv_u8,
803 iemAImpl_idiv_u16,
804 iemAImpl_idiv_u32,
805 iemAImpl_idiv_u64
806};
807
808/** Function table for the SHLD instruction */
809IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
810{
811 iemAImpl_shld_u16,
812 iemAImpl_shld_u32,
813 iemAImpl_shld_u64,
814};
815
816/** Function table for the SHRD instruction */
817IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
818{
819 iemAImpl_shrd_u16,
820 iemAImpl_shrd_u32,
821 iemAImpl_shrd_u64,
822};
823
824
825/** Function table for the PUNPCKLBW instruction */
826IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
827/** Function table for the PUNPCKLBD instruction */
828IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
829/** Function table for the PUNPCKLDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
831/** Function table for the PUNPCKLQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
833
834/** Function table for the PUNPCKHBW instruction */
835IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
836/** Function table for the PUNPCKHBD instruction */
837IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
838/** Function table for the PUNPCKHDQ instruction */
839IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
840/** Function table for the PUNPCKHQDQ instruction */
841IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
842
843/** Function table for the PXOR instruction */
844IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
845/** Function table for the PCMPEQB instruction */
846IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
847/** Function table for the PCMPEQW instruction */
848IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
849/** Function table for the PCMPEQD instruction */
850IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
851
852
853#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
854/** What IEM just wrote. */
855uint8_t g_abIemWrote[256];
856/** How much IEM just wrote. */
857size_t g_cbIemWrote;
858#endif
859
860
861/*********************************************************************************************************************************
862* Internal Functions *
863*********************************************************************************************************************************/
864IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
867IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
868/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
870IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
871IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
872IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
873IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
874IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
875IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
876IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
877IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
878IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
879IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
880IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
881#ifdef IEM_WITH_SETJMP
882DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
883DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
885DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
886DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
887#endif
888
889IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
890IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
891IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
892IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
893IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
894IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
895IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
897IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
898IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
899IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
900IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
901IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
902IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
903IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
904IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
905IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
906
907#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
909#endif
910IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
911IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
914IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
915 uint64_t uExitInfo2);
916IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
917 uint32_t uErr, uint64_t uCr2);
918#endif
919
920/**
921 * Sets the pass up status.
922 *
923 * @returns VINF_SUCCESS.
924 * @param pVCpu The cross context virtual CPU structure of the
925 * calling thread.
926 * @param rcPassUp The pass up status. Must be informational.
927 * VINF_SUCCESS is not allowed.
928 */
929IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
930{
931 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
932
933 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
934 if (rcOldPassUp == VINF_SUCCESS)
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 /* If both are EM scheduling codes, use EM priority rules. */
937 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
938 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
939 {
940 if (rcPassUp < rcOldPassUp)
941 {
942 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
943 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
944 }
945 else
946 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
947 }
948 /* Override EM scheduling with specific status code. */
949 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
950 {
951 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
952 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
953 }
954 /* Don't override specific status code, first come first served. */
955 else
956 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
957 return VINF_SUCCESS;
958}
959
960
961/**
962 * Calculates the CPU mode.
963 *
964 * This is mainly for updating IEMCPU::enmCpuMode.
965 *
966 * @returns CPU mode.
967 * @param pCtx The register context for the CPU.
968 */
969DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
970{
971 if (CPUMIsGuestIn64BitCodeEx(pCtx))
972 return IEMMODE_64BIT;
973 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
974 return IEMMODE_32BIT;
975 return IEMMODE_16BIT;
976}
977
978
979/**
980 * Initializes the execution state.
981 *
982 * @param pVCpu The cross context virtual CPU structure of the
983 * calling thread.
984 * @param fBypassHandlers Whether to bypass access handlers.
985 *
986 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
987 * side-effects in strict builds.
988 */
989DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
990{
991 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
992
993 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
994
995#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1004#endif
1005
1006#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1007 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1008#endif
1009 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1010 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1011#ifdef VBOX_STRICT
1012 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1013 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1014 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1015 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1016 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1017 pVCpu->iem.s.uRexReg = 127;
1018 pVCpu->iem.s.uRexB = 127;
1019 pVCpu->iem.s.uRexIndex = 127;
1020 pVCpu->iem.s.iEffSeg = 127;
1021 pVCpu->iem.s.idxPrefix = 127;
1022 pVCpu->iem.s.uVex3rdReg = 127;
1023 pVCpu->iem.s.uVexLength = 127;
1024 pVCpu->iem.s.fEvexStuff = 127;
1025 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1026# ifdef IEM_WITH_CODE_TLB
1027 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1028 pVCpu->iem.s.pbInstrBuf = NULL;
1029 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1030 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1031 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1032 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1033# else
1034 pVCpu->iem.s.offOpcode = 127;
1035 pVCpu->iem.s.cbOpcode = 127;
1036# endif
1037#endif
1038
1039 pVCpu->iem.s.cActiveMappings = 0;
1040 pVCpu->iem.s.iNextMapping = 0;
1041 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1042 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1043#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1044 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1045 && pCtx->cs.u64Base == 0
1046 && pCtx->cs.u32Limit == UINT32_MAX
1047 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1048 if (!pVCpu->iem.s.fInPatchCode)
1049 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1050#endif
1051
1052#ifdef IEM_VERIFICATION_MODE_FULL
1053 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1054 pVCpu->iem.s.fNoRem = true;
1055#endif
1056}
1057
1058#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1059/**
1060 * Performs a minimal reinitialization of the execution state.
1061 *
1062 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1063 * 'world-switch' types operations on the CPU. Currently only nested
1064 * hardware-virtualization uses it.
1065 *
1066 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1067 */
1068IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1069{
1070 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1071 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1072 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1073
1074 pVCpu->iem.s.uCpl = uCpl;
1075 pVCpu->iem.s.enmCpuMode = enmMode;
1076 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1077 pVCpu->iem.s.enmEffAddrMode = enmMode;
1078 if (enmMode != IEMMODE_64BIT)
1079 {
1080 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1081 pVCpu->iem.s.enmEffOpSize = enmMode;
1082 }
1083 else
1084 {
1085 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1086 pVCpu->iem.s.enmEffOpSize = enmMode;
1087 }
1088 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1089#ifndef IEM_WITH_CODE_TLB
1090 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1091 pVCpu->iem.s.offOpcode = 0;
1092 pVCpu->iem.s.cbOpcode = 0;
1093#endif
1094}
1095#endif
1096
1097/**
1098 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure of the
1101 * calling thread.
1102 */
1103DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1104{
1105 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1106#ifdef IEM_VERIFICATION_MODE_FULL
1107 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1108#endif
1109#ifdef VBOX_STRICT
1110# ifdef IEM_WITH_CODE_TLB
1111 NOREF(pVCpu);
1112# else
1113 pVCpu->iem.s.cbOpcode = 0;
1114# endif
1115#else
1116 NOREF(pVCpu);
1117#endif
1118}
1119
1120
1121/**
1122 * Initializes the decoder state.
1123 *
1124 * iemReInitDecoder is mostly a copy of this function.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure of the
1127 * calling thread.
1128 * @param fBypassHandlers Whether to bypass access handlers.
1129 */
1130DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1131{
1132 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1133
1134 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1135
1136#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1145#endif
1146
1147#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1148 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1149#endif
1150 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1151#ifdef IEM_VERIFICATION_MODE_FULL
1152 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1153 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1154#endif
1155 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1156 pVCpu->iem.s.enmCpuMode = enmMode;
1157 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffAddrMode = enmMode;
1159 if (enmMode != IEMMODE_64BIT)
1160 {
1161 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 else
1165 {
1166 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1167 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1168 }
1169 pVCpu->iem.s.fPrefixes = 0;
1170 pVCpu->iem.s.uRexReg = 0;
1171 pVCpu->iem.s.uRexB = 0;
1172 pVCpu->iem.s.uRexIndex = 0;
1173 pVCpu->iem.s.idxPrefix = 0;
1174 pVCpu->iem.s.uVex3rdReg = 0;
1175 pVCpu->iem.s.uVexLength = 0;
1176 pVCpu->iem.s.fEvexStuff = 0;
1177 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1178#ifdef IEM_WITH_CODE_TLB
1179 pVCpu->iem.s.pbInstrBuf = NULL;
1180 pVCpu->iem.s.offInstrNextByte = 0;
1181 pVCpu->iem.s.offCurInstrStart = 0;
1182# ifdef VBOX_STRICT
1183 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1184 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1185 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1186# endif
1187#else
1188 pVCpu->iem.s.offOpcode = 0;
1189 pVCpu->iem.s.cbOpcode = 0;
1190#endif
1191 pVCpu->iem.s.cActiveMappings = 0;
1192 pVCpu->iem.s.iNextMapping = 0;
1193 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1194 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202#endif
1203
1204#ifdef DBGFTRACE_ENABLED
1205 switch (enmMode)
1206 {
1207 case IEMMODE_64BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1209 break;
1210 case IEMMODE_32BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 case IEMMODE_16BIT:
1214 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1215 break;
1216 }
1217#endif
1218}
1219
1220
1221/**
1222 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1223 *
1224 * This is mostly a copy of iemInitDecoder.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1227 */
1228DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1229{
1230 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1231
1232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1233
1234#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1243#endif
1244
1245 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1246#ifdef IEM_VERIFICATION_MODE_FULL
1247 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1248 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1249#endif
1250 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1251 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1252 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1253 pVCpu->iem.s.enmEffAddrMode = enmMode;
1254 if (enmMode != IEMMODE_64BIT)
1255 {
1256 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1257 pVCpu->iem.s.enmEffOpSize = enmMode;
1258 }
1259 else
1260 {
1261 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1262 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1263 }
1264 pVCpu->iem.s.fPrefixes = 0;
1265 pVCpu->iem.s.uRexReg = 0;
1266 pVCpu->iem.s.uRexB = 0;
1267 pVCpu->iem.s.uRexIndex = 0;
1268 pVCpu->iem.s.idxPrefix = 0;
1269 pVCpu->iem.s.uVex3rdReg = 0;
1270 pVCpu->iem.s.uVexLength = 0;
1271 pVCpu->iem.s.fEvexStuff = 0;
1272 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1273#ifdef IEM_WITH_CODE_TLB
1274 if (pVCpu->iem.s.pbInstrBuf)
1275 {
1276 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1277 - pVCpu->iem.s.uInstrBufPc;
1278 if (off < pVCpu->iem.s.cbInstrBufTotal)
1279 {
1280 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1281 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1282 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1283 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1284 else
1285 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1286 }
1287 else
1288 {
1289 pVCpu->iem.s.pbInstrBuf = NULL;
1290 pVCpu->iem.s.offInstrNextByte = 0;
1291 pVCpu->iem.s.offCurInstrStart = 0;
1292 pVCpu->iem.s.cbInstrBuf = 0;
1293 pVCpu->iem.s.cbInstrBufTotal = 0;
1294 }
1295 }
1296 else
1297 {
1298 pVCpu->iem.s.offInstrNextByte = 0;
1299 pVCpu->iem.s.offCurInstrStart = 0;
1300 pVCpu->iem.s.cbInstrBuf = 0;
1301 pVCpu->iem.s.cbInstrBufTotal = 0;
1302 }
1303#else
1304 pVCpu->iem.s.cbOpcode = 0;
1305 pVCpu->iem.s.offOpcode = 0;
1306#endif
1307 Assert(pVCpu->iem.s.cActiveMappings == 0);
1308 pVCpu->iem.s.iNextMapping = 0;
1309 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1310 Assert(pVCpu->iem.s.fBypassHandlers == false);
1311#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1312 if (!pVCpu->iem.s.fInPatchCode)
1313 { /* likely */ }
1314 else
1315 {
1316 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1317 && pCtx->cs.u64Base == 0
1318 && pCtx->cs.u32Limit == UINT32_MAX
1319 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1320 if (!pVCpu->iem.s.fInPatchCode)
1321 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1322 }
1323#endif
1324
1325#ifdef DBGFTRACE_ENABLED
1326 switch (enmMode)
1327 {
1328 case IEMMODE_64BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1330 break;
1331 case IEMMODE_32BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 case IEMMODE_16BIT:
1335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1336 break;
1337 }
1338#endif
1339}
1340
1341
1342
1343/**
1344 * Prefetch opcodes the first time when starting executing.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pVCpu The cross context virtual CPU structure of the
1348 * calling thread.
1349 * @param fBypassHandlers Whether to bypass access handlers.
1350 */
1351IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1352{
1353#ifdef IEM_VERIFICATION_MODE_FULL
1354 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1355#endif
1356 iemInitDecoder(pVCpu, fBypassHandlers);
1357
1358#ifdef IEM_WITH_CODE_TLB
1359 /** @todo Do ITLB lookup here. */
1360
1361#else /* !IEM_WITH_CODE_TLB */
1362
1363 /*
1364 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1365 *
1366 * First translate CS:rIP to a physical address.
1367 */
1368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1369 uint32_t cbToTryRead;
1370 RTGCPTR GCPtrPC;
1371 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1372 {
1373 cbToTryRead = PAGE_SIZE;
1374 GCPtrPC = pCtx->rip;
1375 if (IEM_IS_CANONICAL(GCPtrPC))
1376 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1377 else
1378 return iemRaiseGeneralProtectionFault0(pVCpu);
1379 }
1380 else
1381 {
1382 uint32_t GCPtrPC32 = pCtx->eip;
1383 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1384 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1385 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1386 else
1387 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1388 if (cbToTryRead) { /* likely */ }
1389 else /* overflowed */
1390 {
1391 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1392 cbToTryRead = UINT32_MAX;
1393 }
1394 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1395 Assert(GCPtrPC <= UINT32_MAX);
1396 }
1397
1398# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1399 /* Allow interpretation of patch manager code blocks since they can for
1400 instance throw #PFs for perfectly good reasons. */
1401 if (pVCpu->iem.s.fInPatchCode)
1402 {
1403 size_t cbRead = 0;
1404 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1405 AssertRCReturn(rc, rc);
1406 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1407 return VINF_SUCCESS;
1408 }
1409# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1410
1411 RTGCPHYS GCPhys;
1412 uint64_t fFlags;
1413 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1414 if (RT_SUCCESS(rc)) { /* probable */ }
1415 else
1416 {
1417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1418 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1419 }
1420 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1421 else
1422 {
1423 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1424 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1425 }
1426 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1427 else
1428 {
1429 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1430 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1431 }
1432 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1433 /** @todo Check reserved bits and such stuff. PGM is better at doing
1434 * that, so do it when implementing the guest virtual address
1435 * TLB... */
1436
1437# ifdef IEM_VERIFICATION_MODE_FULL
1438 /*
1439 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1440 * instruction.
1441 */
1442 /** @todo optimize this differently by not using PGMPhysRead. */
1443 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1444 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1445 if ( offPrevOpcodes < cbOldOpcodes
1446 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1447 {
1448 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1449 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1450 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1451 pVCpu->iem.s.cbOpcode = cbNew;
1452 return VINF_SUCCESS;
1453 }
1454# endif
1455
1456 /*
1457 * Read the bytes at this address.
1458 */
1459 PVM pVM = pVCpu->CTX_SUFF(pVM);
1460# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1461 size_t cbActual;
1462 if ( PATMIsEnabled(pVM)
1463 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1464 {
1465 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1466 Assert(cbActual > 0);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1468 }
1469 else
1470# endif
1471 {
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511 }
1512#endif /* !IEM_WITH_CODE_TLB */
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Invalidates the IEM TLBs.
1519 *
1520 * This is called internally as well as by PGM when moving GC mappings.
1521 *
1522 * @returns
1523 * @param pVCpu The cross context virtual CPU structure of the calling
1524 * thread.
1525 * @param fVmm Set when PGM calls us with a remapping.
1526 */
1527VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1528{
1529#ifdef IEM_WITH_CODE_TLB
1530 pVCpu->iem.s.cbInstrBufTotal = 0;
1531 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542
1543#ifdef IEM_WITH_DATA_TLB
1544 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1545 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1546 { /* very likely */ }
1547 else
1548 {
1549 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1550 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1551 while (i-- > 0)
1552 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1553 }
1554#endif
1555 NOREF(pVCpu); NOREF(fVmm);
1556}
1557
1558
1559/**
1560 * Invalidates a page in the TLBs.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 * @param GCPtr The address of the page to invalidate
1565 */
1566VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1567{
1568#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1569 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1572 uintptr_t idx = (uint8_t)GCPtr;
1573
1574# ifdef IEM_WITH_CODE_TLB
1575 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1576 {
1577 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1578 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1579 pVCpu->iem.s.cbInstrBufTotal = 0;
1580 }
1581# endif
1582
1583# ifdef IEM_WITH_DATA_TLB
1584 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1585 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1586# endif
1587#else
1588 NOREF(pVCpu); NOREF(GCPtr);
1589#endif
1590}
1591
1592
1593/**
1594 * Invalidates the host physical aspects of the IEM TLBs.
1595 *
1596 * This is called internally as well as by PGM when moving GC mappings.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 */
1601VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1602{
1603#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1604 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608# endif
1609 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1610 if (uTlbPhysRev != 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1613 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1614 }
1615 else
1616 {
1617 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619
1620 unsigned i;
1621# ifdef IEM_WITH_CODE_TLB
1622 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1623 while (i-- > 0)
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1626 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1627 }
1628# endif
1629# ifdef IEM_WITH_DATA_TLB
1630 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1631 while (i-- > 0)
1632 {
1633 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1634 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1635 }
1636# endif
1637 }
1638#else
1639 NOREF(pVCpu);
1640#endif
1641}
1642
1643
1644/**
1645 * Invalidates the host physical aspects of the IEM TLBs.
1646 *
1647 * This is called internally as well as by PGM when moving GC mappings.
1648 *
1649 * @param pVM The cross context VM structure.
1650 *
1651 * @remarks Caller holds the PGM lock.
1652 */
1653VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1654{
1655 RT_NOREF_PV(pVM);
1656}
1657
1658#ifdef IEM_WITH_CODE_TLB
1659
1660/**
1661 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1662 * failure and jumps.
1663 *
1664 * We end up here for a number of reasons:
1665 * - pbInstrBuf isn't yet initialized.
1666 * - Advancing beyond the buffer boundrary (e.g. cross page).
1667 * - Advancing beyond the CS segment limit.
1668 * - Fetching from non-mappable page (e.g. MMIO).
1669 *
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param pvDst Where to return the bytes.
1673 * @param cbDst Number of bytes to read.
1674 *
1675 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1676 */
1677IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1678{
1679#ifdef IN_RING3
1680//__debugbreak();
1681 for (;;)
1682 {
1683 Assert(cbDst <= 8);
1684 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1685
1686 /*
1687 * We might have a partial buffer match, deal with that first to make the
1688 * rest simpler. This is the first part of the cross page/buffer case.
1689 */
1690 if (pVCpu->iem.s.pbInstrBuf != NULL)
1691 {
1692 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1693 {
1694 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1695 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1696 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1697
1698 cbDst -= cbCopy;
1699 pvDst = (uint8_t *)pvDst + cbCopy;
1700 offBuf += cbCopy;
1701 pVCpu->iem.s.offInstrNextByte += offBuf;
1702 }
1703 }
1704
1705 /*
1706 * Check segment limit, figuring how much we're allowed to access at this point.
1707 *
1708 * We will fault immediately if RIP is past the segment limit / in non-canonical
1709 * territory. If we do continue, there are one or more bytes to read before we
1710 * end up in trouble and we need to do that first before faulting.
1711 */
1712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1713 RTGCPTR GCPtrFirst;
1714 uint32_t cbMaxRead;
1715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1716 {
1717 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1718 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1719 { /* likely */ }
1720 else
1721 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1722 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1723 }
1724 else
1725 {
1726 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1727 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1728 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1729 { /* likely */ }
1730 else
1731 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1732 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1733 if (cbMaxRead != 0)
1734 { /* likely */ }
1735 else
1736 {
1737 /* Overflowed because address is 0 and limit is max. */
1738 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1739 cbMaxRead = X86_PAGE_SIZE;
1740 }
1741 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1742 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1743 if (cbMaxRead2 < cbMaxRead)
1744 cbMaxRead = cbMaxRead2;
1745 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1746 }
1747
1748 /*
1749 * Get the TLB entry for this piece of code.
1750 */
1751 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1752 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1753 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1754 if (pTlbe->uTag == uTag)
1755 {
1756 /* likely when executing lots of code, otherwise unlikely */
1757# ifdef VBOX_WITH_STATISTICS
1758 pVCpu->iem.s.CodeTlb.cTlbHits++;
1759# endif
1760 }
1761 else
1762 {
1763 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1764# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1765 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1766 {
1767 pTlbe->uTag = uTag;
1768 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1769 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1770 pTlbe->GCPhys = NIL_RTGCPHYS;
1771 pTlbe->pbMappingR3 = NULL;
1772 }
1773 else
1774# endif
1775 {
1776 RTGCPHYS GCPhys;
1777 uint64_t fFlags;
1778 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1779 if (RT_FAILURE(rc))
1780 {
1781 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1782 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1783 }
1784
1785 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1786 pTlbe->uTag = uTag;
1787 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1788 pTlbe->GCPhys = GCPhys;
1789 pTlbe->pbMappingR3 = NULL;
1790 }
1791 }
1792
1793 /*
1794 * Check TLB page table level access flags.
1795 */
1796 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1797 {
1798 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1799 {
1800 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1801 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1802 }
1803 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1804 {
1805 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1806 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1807 }
1808 }
1809
1810# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811 /*
1812 * Allow interpretation of patch manager code blocks since they can for
1813 * instance throw #PFs for perfectly good reasons.
1814 */
1815 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1816 { /* no unlikely */ }
1817 else
1818 {
1819 /** @todo Could be optimized this a little in ring-3 if we liked. */
1820 size_t cbRead = 0;
1821 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1822 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1823 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1824 return;
1825 }
1826# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1827
1828 /*
1829 * Look up the physical page info if necessary.
1830 */
1831 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 { /* not necessary */ }
1833 else
1834 {
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1836 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1837 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1838 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1839 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1840 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1841 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1842 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1843 }
1844
1845# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1846 /*
1847 * Try do a direct read using the pbMappingR3 pointer.
1848 */
1849 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1850 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1851 {
1852 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1853 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1854 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1855 {
1856 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1857 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1858 }
1859 else
1860 {
1861 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1862 Assert(cbInstr < cbMaxRead);
1863 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1864 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1865 }
1866 if (cbDst <= cbMaxRead)
1867 {
1868 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1869 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1870 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1871 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1872 return;
1873 }
1874 pVCpu->iem.s.pbInstrBuf = NULL;
1875
1876 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1877 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1878 }
1879 else
1880# endif
1881#if 0
1882 /*
1883 * If there is no special read handling, so we can read a bit more and
1884 * put it in the prefetch buffer.
1885 */
1886 if ( cbDst < cbMaxRead
1887 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1888 {
1889 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1890 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1892 { /* likely */ }
1893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1894 {
1895 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1896 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1898 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1899 }
1900 else
1901 {
1902 Log((RT_SUCCESS(rcStrict)
1903 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1904 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1905 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1906 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1907 }
1908 }
1909 /*
1910 * Special read handling, so only read exactly what's needed.
1911 * This is a highly unlikely scenario.
1912 */
1913 else
1914#endif
1915 {
1916 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1917 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1918 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1919 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1921 { /* likely */ }
1922 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1923 {
1924 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1925 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1926 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1927 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1928 }
1929 else
1930 {
1931 Log((RT_SUCCESS(rcStrict)
1932 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1933 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1934 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1935 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1936 }
1937 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1938 if (cbToRead == cbDst)
1939 return;
1940 }
1941
1942 /*
1943 * More to read, loop.
1944 */
1945 cbDst -= cbMaxRead;
1946 pvDst = (uint8_t *)pvDst + cbMaxRead;
1947 }
1948#else
1949 RT_NOREF(pvDst, cbDst);
1950 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1951#endif
1952}
1953
1954#else
1955
1956/**
1957 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1958 * exception if it fails.
1959 *
1960 * @returns Strict VBox status code.
1961 * @param pVCpu The cross context virtual CPU structure of the
1962 * calling thread.
1963 * @param cbMin The minimum number of bytes relative offOpcode
1964 * that must be read.
1965 */
1966IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1967{
1968 /*
1969 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1970 *
1971 * First translate CS:rIP to a physical address.
1972 */
1973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1974 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1975 uint32_t cbToTryRead;
1976 RTGCPTR GCPtrNext;
1977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1978 {
1979 cbToTryRead = PAGE_SIZE;
1980 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1981 if (!IEM_IS_CANONICAL(GCPtrNext))
1982 return iemRaiseGeneralProtectionFault0(pVCpu);
1983 }
1984 else
1985 {
1986 uint32_t GCPtrNext32 = pCtx->eip;
1987 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1988 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1989 if (GCPtrNext32 > pCtx->cs.u32Limit)
1990 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1991 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1992 if (!cbToTryRead) /* overflowed */
1993 {
1994 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1995 cbToTryRead = UINT32_MAX;
1996 /** @todo check out wrapping around the code segment. */
1997 }
1998 if (cbToTryRead < cbMin - cbLeft)
1999 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2000 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
2001 }
2002
2003 /* Only read up to the end of the page, and make sure we don't read more
2004 than the opcode buffer can hold. */
2005 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2006 if (cbToTryRead > cbLeftOnPage)
2007 cbToTryRead = cbLeftOnPage;
2008 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2009 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2010/** @todo r=bird: Convert assertion into undefined opcode exception? */
2011 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2012
2013# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2014 /* Allow interpretation of patch manager code blocks since they can for
2015 instance throw #PFs for perfectly good reasons. */
2016 if (pVCpu->iem.s.fInPatchCode)
2017 {
2018 size_t cbRead = 0;
2019 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2020 AssertRCReturn(rc, rc);
2021 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2022 return VINF_SUCCESS;
2023 }
2024# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2025
2026 RTGCPHYS GCPhys;
2027 uint64_t fFlags;
2028 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2029 if (RT_FAILURE(rc))
2030 {
2031 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2032 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2033 }
2034 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2035 {
2036 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2037 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2038 }
2039 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2040 {
2041 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2043 }
2044 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2046 /** @todo Check reserved bits and such stuff. PGM is better at doing
2047 * that, so do it when implementing the guest virtual address
2048 * TLB... */
2049
2050 /*
2051 * Read the bytes at this address.
2052 *
2053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2054 * and since PATM should only patch the start of an instruction there
2055 * should be no need to check again here.
2056 */
2057 if (!pVCpu->iem.s.fBypassHandlers)
2058 {
2059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2060 cbToTryRead, PGMACCESSORIGIN_IEM);
2061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2062 { /* likely */ }
2063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2068 }
2069 else
2070 {
2071 Log((RT_SUCCESS(rcStrict)
2072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2075 return rcStrict;
2076 }
2077 }
2078 else
2079 {
2080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2081 if (RT_SUCCESS(rc))
2082 { /* likely */ }
2083 else
2084 {
2085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2086 return rc;
2087 }
2088 }
2089 pVCpu->iem.s.cbOpcode += cbToTryRead;
2090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2091
2092 return VINF_SUCCESS;
2093}
2094
2095#endif /* !IEM_WITH_CODE_TLB */
2096#ifndef IEM_WITH_SETJMP
2097
2098/**
2099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2100 *
2101 * @returns Strict VBox status code.
2102 * @param pVCpu The cross context virtual CPU structure of the
2103 * calling thread.
2104 * @param pb Where to return the opcode byte.
2105 */
2106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2107{
2108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2109 if (rcStrict == VINF_SUCCESS)
2110 {
2111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2113 pVCpu->iem.s.offOpcode = offOpcode + 1;
2114 }
2115 else
2116 *pb = 0;
2117 return rcStrict;
2118}
2119
2120
2121/**
2122 * Fetches the next opcode byte.
2123 *
2124 * @returns Strict VBox status code.
2125 * @param pVCpu The cross context virtual CPU structure of the
2126 * calling thread.
2127 * @param pu8 Where to return the opcode byte.
2128 */
2129DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2130{
2131 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2132 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2133 {
2134 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2135 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2136 return VINF_SUCCESS;
2137 }
2138 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2139}
2140
2141#else /* IEM_WITH_SETJMP */
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2145 *
2146 * @returns The opcode byte.
2147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2148 */
2149DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2150{
2151# ifdef IEM_WITH_CODE_TLB
2152 uint8_t u8;
2153 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2154 return u8;
2155# else
2156 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2157 if (rcStrict == VINF_SUCCESS)
2158 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2160# endif
2161}
2162
2163
2164/**
2165 * Fetches the next opcode byte, longjmp on error.
2166 *
2167 * @returns The opcode byte.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 */
2170DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2171{
2172# ifdef IEM_WITH_CODE_TLB
2173 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2174 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2175 if (RT_LIKELY( pbBuf != NULL
2176 && offBuf < pVCpu->iem.s.cbInstrBuf))
2177 {
2178 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2179 return pbBuf[offBuf];
2180 }
2181# else
2182 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2183 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2184 {
2185 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2186 return pVCpu->iem.s.abOpcode[offOpcode];
2187 }
2188# endif
2189 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2190}
2191
2192#endif /* IEM_WITH_SETJMP */
2193
2194/**
2195 * Fetches the next opcode byte, returns automatically on failure.
2196 *
2197 * @param a_pu8 Where to return the opcode byte.
2198 * @remark Implicitly references pVCpu.
2199 */
2200#ifndef IEM_WITH_SETJMP
2201# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2202 do \
2203 { \
2204 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2205 if (rcStrict2 == VINF_SUCCESS) \
2206 { /* likely */ } \
2207 else \
2208 return rcStrict2; \
2209 } while (0)
2210#else
2211# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2212#endif /* IEM_WITH_SETJMP */
2213
2214
2215#ifndef IEM_WITH_SETJMP
2216/**
2217 * Fetches the next signed byte from the opcode stream.
2218 *
2219 * @returns Strict VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param pi8 Where to return the signed byte.
2222 */
2223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2224{
2225 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2226}
2227#endif /* !IEM_WITH_SETJMP */
2228
2229
2230/**
2231 * Fetches the next signed byte from the opcode stream, returning automatically
2232 * on failure.
2233 *
2234 * @param a_pi8 Where to return the signed byte.
2235 * @remark Implicitly references pVCpu.
2236 */
2237#ifndef IEM_WITH_SETJMP
2238# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2239 do \
2240 { \
2241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2242 if (rcStrict2 != VINF_SUCCESS) \
2243 return rcStrict2; \
2244 } while (0)
2245#else /* IEM_WITH_SETJMP */
2246# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2247
2248#endif /* IEM_WITH_SETJMP */
2249
2250#ifndef IEM_WITH_SETJMP
2251
2252/**
2253 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2254 *
2255 * @returns Strict VBox status code.
2256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2257 * @param pu16 Where to return the opcode dword.
2258 */
2259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2260{
2261 uint8_t u8;
2262 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2263 if (rcStrict == VINF_SUCCESS)
2264 *pu16 = (int8_t)u8;
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, extending it to
2271 * unsigned 16-bit.
2272 *
2273 * @returns Strict VBox status code.
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param pu16 Where to return the unsigned word.
2276 */
2277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2278{
2279 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2280 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2281 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2282
2283 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2284 pVCpu->iem.s.offOpcode = offOpcode + 1;
2285 return VINF_SUCCESS;
2286}
2287
2288#endif /* !IEM_WITH_SETJMP */
2289
2290/**
2291 * Fetches the next signed byte from the opcode stream and sign-extending it to
2292 * a word, returning automatically on failure.
2293 *
2294 * @param a_pu16 Where to return the word.
2295 * @remark Implicitly references pVCpu.
2296 */
2297#ifndef IEM_WITH_SETJMP
2298# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2299 do \
2300 { \
2301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2302 if (rcStrict2 != VINF_SUCCESS) \
2303 return rcStrict2; \
2304 } while (0)
2305#else
2306# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2307#endif
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu32 Where to return the opcode dword.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2319{
2320 uint8_t u8;
2321 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2322 if (rcStrict == VINF_SUCCESS)
2323 *pu32 = (int8_t)u8;
2324 return rcStrict;
2325}
2326
2327
2328/**
2329 * Fetches the next signed byte from the opcode stream, extending it to
2330 * unsigned 32-bit.
2331 *
2332 * @returns Strict VBox status code.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 * @param pu32 Where to return the unsigned dword.
2335 */
2336DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2337{
2338 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2339 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2340 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2341
2342 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2343 pVCpu->iem.s.offOpcode = offOpcode + 1;
2344 return VINF_SUCCESS;
2345}
2346
2347#endif /* !IEM_WITH_SETJMP */
2348
2349/**
2350 * Fetches the next signed byte from the opcode stream and sign-extending it to
2351 * a word, returning automatically on failure.
2352 *
2353 * @param a_pu32 Where to return the word.
2354 * @remark Implicitly references pVCpu.
2355 */
2356#ifndef IEM_WITH_SETJMP
2357#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2358 do \
2359 { \
2360 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2361 if (rcStrict2 != VINF_SUCCESS) \
2362 return rcStrict2; \
2363 } while (0)
2364#else
2365# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2366#endif
2367
2368#ifndef IEM_WITH_SETJMP
2369
2370/**
2371 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu64 Where to return the opcode qword.
2376 */
2377DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2378{
2379 uint8_t u8;
2380 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2381 if (rcStrict == VINF_SUCCESS)
2382 *pu64 = (int8_t)u8;
2383 return rcStrict;
2384}
2385
2386
2387/**
2388 * Fetches the next signed byte from the opcode stream, extending it to
2389 * unsigned 64-bit.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu64 Where to return the unsigned qword.
2394 */
2395DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2396{
2397 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2398 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2399 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2400
2401 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2402 pVCpu->iem.s.offOpcode = offOpcode + 1;
2403 return VINF_SUCCESS;
2404}
2405
2406#endif /* !IEM_WITH_SETJMP */
2407
2408
2409/**
2410 * Fetches the next signed byte from the opcode stream and sign-extending it to
2411 * a word, returning automatically on failure.
2412 *
2413 * @param a_pu64 Where to return the word.
2414 * @remark Implicitly references pVCpu.
2415 */
2416#ifndef IEM_WITH_SETJMP
2417# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2418 do \
2419 { \
2420 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2421 if (rcStrict2 != VINF_SUCCESS) \
2422 return rcStrict2; \
2423 } while (0)
2424#else
2425# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2426#endif
2427
2428
2429#ifndef IEM_WITH_SETJMP
2430
2431/**
2432 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2433 *
2434 * @returns Strict VBox status code.
2435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2436 * @param pu16 Where to return the opcode word.
2437 */
2438DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2439{
2440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2441 if (rcStrict == VINF_SUCCESS)
2442 {
2443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2444# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2445 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2446# else
2447 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2448# endif
2449 pVCpu->iem.s.offOpcode = offOpcode + 2;
2450 }
2451 else
2452 *pu16 = 0;
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * Fetches the next opcode word.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 * @param pu16 Where to return the opcode word.
2463 */
2464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2465{
2466 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2467 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2468 {
2469 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2471 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2472# else
2473 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2474# endif
2475 return VINF_SUCCESS;
2476 }
2477 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2478}
2479
2480#else /* IEM_WITH_SETJMP */
2481
2482/**
2483 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2484 *
2485 * @returns The opcode word.
2486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2489{
2490# ifdef IEM_WITH_CODE_TLB
2491 uint16_t u16;
2492 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2493 return u16;
2494# else
2495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2496 if (rcStrict == VINF_SUCCESS)
2497 {
2498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2499 pVCpu->iem.s.offOpcode += 2;
2500# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2501 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2502# else
2503 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2504# endif
2505 }
2506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2507# endif
2508}
2509
2510
2511/**
2512 * Fetches the next opcode word, longjmp on error.
2513 *
2514 * @returns The opcode word.
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 */
2517DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2518{
2519# ifdef IEM_WITH_CODE_TLB
2520 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2521 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2522 if (RT_LIKELY( pbBuf != NULL
2523 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2524 {
2525 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2527 return *(uint16_t const *)&pbBuf[offBuf];
2528# else
2529 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2530# endif
2531 }
2532# else
2533 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2535 {
2536 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2537# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2538 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2539# else
2540 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2541# endif
2542 }
2543# endif
2544 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2545}
2546
2547#endif /* IEM_WITH_SETJMP */
2548
2549
2550/**
2551 * Fetches the next opcode word, returns automatically on failure.
2552 *
2553 * @param a_pu16 Where to return the opcode word.
2554 * @remark Implicitly references pVCpu.
2555 */
2556#ifndef IEM_WITH_SETJMP
2557# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2558 do \
2559 { \
2560 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2561 if (rcStrict2 != VINF_SUCCESS) \
2562 return rcStrict2; \
2563 } while (0)
2564#else
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2566#endif
2567
2568#ifndef IEM_WITH_SETJMP
2569
2570/**
2571 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2572 *
2573 * @returns Strict VBox status code.
2574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2575 * @param pu32 Where to return the opcode double word.
2576 */
2577DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2578{
2579 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2580 if (rcStrict == VINF_SUCCESS)
2581 {
2582 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2583 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584 pVCpu->iem.s.offOpcode = offOpcode + 2;
2585 }
2586 else
2587 *pu32 = 0;
2588 return rcStrict;
2589}
2590
2591
2592/**
2593 * Fetches the next opcode word, zero extending it to a double word.
2594 *
2595 * @returns Strict VBox status code.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 * @param pu32 Where to return the opcode double word.
2598 */
2599DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2600{
2601 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2602 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2603 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2604
2605 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2606 pVCpu->iem.s.offOpcode = offOpcode + 2;
2607 return VINF_SUCCESS;
2608}
2609
2610#endif /* !IEM_WITH_SETJMP */
2611
2612
2613/**
2614 * Fetches the next opcode word and zero extends it to a double word, returns
2615 * automatically on failure.
2616 *
2617 * @param a_pu32 Where to return the opcode double word.
2618 * @remark Implicitly references pVCpu.
2619 */
2620#ifndef IEM_WITH_SETJMP
2621# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2622 do \
2623 { \
2624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2625 if (rcStrict2 != VINF_SUCCESS) \
2626 return rcStrict2; \
2627 } while (0)
2628#else
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2630#endif
2631
2632#ifndef IEM_WITH_SETJMP
2633
2634/**
2635 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pu64 Where to return the opcode quad word.
2640 */
2641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2642{
2643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2644 if (rcStrict == VINF_SUCCESS)
2645 {
2646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2647 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2648 pVCpu->iem.s.offOpcode = offOpcode + 2;
2649 }
2650 else
2651 *pu64 = 0;
2652 return rcStrict;
2653}
2654
2655
2656/**
2657 * Fetches the next opcode word, zero extending it to a quad word.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu64 Where to return the opcode quad word.
2662 */
2663DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2664{
2665 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2666 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2667 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2668
2669 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2670 pVCpu->iem.s.offOpcode = offOpcode + 2;
2671 return VINF_SUCCESS;
2672}
2673
2674#endif /* !IEM_WITH_SETJMP */
2675
2676/**
2677 * Fetches the next opcode word and zero extends it to a quad word, returns
2678 * automatically on failure.
2679 *
2680 * @param a_pu64 Where to return the opcode quad word.
2681 * @remark Implicitly references pVCpu.
2682 */
2683#ifndef IEM_WITH_SETJMP
2684# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2685 do \
2686 { \
2687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2688 if (rcStrict2 != VINF_SUCCESS) \
2689 return rcStrict2; \
2690 } while (0)
2691#else
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2693#endif
2694
2695
2696#ifndef IEM_WITH_SETJMP
2697/**
2698 * Fetches the next signed word from the opcode stream.
2699 *
2700 * @returns Strict VBox status code.
2701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2702 * @param pi16 Where to return the signed word.
2703 */
2704DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2705{
2706 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2707}
2708#endif /* !IEM_WITH_SETJMP */
2709
2710
2711/**
2712 * Fetches the next signed word from the opcode stream, returning automatically
2713 * on failure.
2714 *
2715 * @param a_pi16 Where to return the signed word.
2716 * @remark Implicitly references pVCpu.
2717 */
2718#ifndef IEM_WITH_SETJMP
2719# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2720 do \
2721 { \
2722 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2723 if (rcStrict2 != VINF_SUCCESS) \
2724 return rcStrict2; \
2725 } while (0)
2726#else
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2728#endif
2729
2730#ifndef IEM_WITH_SETJMP
2731
2732/**
2733 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2734 *
2735 * @returns Strict VBox status code.
2736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2737 * @param pu32 Where to return the opcode dword.
2738 */
2739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2740{
2741 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2742 if (rcStrict == VINF_SUCCESS)
2743 {
2744 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2745# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2746 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2747# else
2748 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2749 pVCpu->iem.s.abOpcode[offOpcode + 1],
2750 pVCpu->iem.s.abOpcode[offOpcode + 2],
2751 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2752# endif
2753 pVCpu->iem.s.offOpcode = offOpcode + 4;
2754 }
2755 else
2756 *pu32 = 0;
2757 return rcStrict;
2758}
2759
2760
2761/**
2762 * Fetches the next opcode dword.
2763 *
2764 * @returns Strict VBox status code.
2765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2766 * @param pu32 Where to return the opcode double word.
2767 */
2768DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2769{
2770 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2771 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2772 {
2773 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2774# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2775 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2776# else
2777 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2778 pVCpu->iem.s.abOpcode[offOpcode + 1],
2779 pVCpu->iem.s.abOpcode[offOpcode + 2],
2780 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2781# endif
2782 return VINF_SUCCESS;
2783 }
2784 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2785}
2786
2787#else /* !IEM_WITH_SETJMP */
2788
2789/**
2790 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2791 *
2792 * @returns The opcode dword.
2793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2796{
2797# ifdef IEM_WITH_CODE_TLB
2798 uint32_t u32;
2799 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2800 return u32;
2801# else
2802 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2803 if (rcStrict == VINF_SUCCESS)
2804 {
2805 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2806 pVCpu->iem.s.offOpcode = offOpcode + 4;
2807# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2808 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2809# else
2810 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2811 pVCpu->iem.s.abOpcode[offOpcode + 1],
2812 pVCpu->iem.s.abOpcode[offOpcode + 2],
2813 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2814# endif
2815 }
2816 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2817# endif
2818}
2819
2820
2821/**
2822 * Fetches the next opcode dword, longjmp on error.
2823 *
2824 * @returns The opcode dword.
2825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2826 */
2827DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2828{
2829# ifdef IEM_WITH_CODE_TLB
2830 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2831 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2832 if (RT_LIKELY( pbBuf != NULL
2833 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2834 {
2835 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2836# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2837 return *(uint32_t const *)&pbBuf[offBuf];
2838# else
2839 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2840 pbBuf[offBuf + 1],
2841 pbBuf[offBuf + 2],
2842 pbBuf[offBuf + 3]);
2843# endif
2844 }
2845# else
2846 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2847 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2848 {
2849 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2854 pVCpu->iem.s.abOpcode[offOpcode + 1],
2855 pVCpu->iem.s.abOpcode[offOpcode + 2],
2856 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2857# endif
2858 }
2859# endif
2860 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2861}
2862
2863#endif /* !IEM_WITH_SETJMP */
2864
2865
2866/**
2867 * Fetches the next opcode dword, returns automatically on failure.
2868 *
2869 * @param a_pu32 Where to return the opcode dword.
2870 * @remark Implicitly references pVCpu.
2871 */
2872#ifndef IEM_WITH_SETJMP
2873# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2874 do \
2875 { \
2876 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2877 if (rcStrict2 != VINF_SUCCESS) \
2878 return rcStrict2; \
2879 } while (0)
2880#else
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2882#endif
2883
2884#ifndef IEM_WITH_SETJMP
2885
2886/**
2887 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param pu64 Where to return the opcode dword.
2892 */
2893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2894{
2895 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2896 if (rcStrict == VINF_SUCCESS)
2897 {
2898 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2899 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2900 pVCpu->iem.s.abOpcode[offOpcode + 1],
2901 pVCpu->iem.s.abOpcode[offOpcode + 2],
2902 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2903 pVCpu->iem.s.offOpcode = offOpcode + 4;
2904 }
2905 else
2906 *pu64 = 0;
2907 return rcStrict;
2908}
2909
2910
2911/**
2912 * Fetches the next opcode dword, zero extending it to a quad word.
2913 *
2914 * @returns Strict VBox status code.
2915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2916 * @param pu64 Where to return the opcode quad word.
2917 */
2918DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2919{
2920 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2921 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2922 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2923
2924 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2925 pVCpu->iem.s.abOpcode[offOpcode + 1],
2926 pVCpu->iem.s.abOpcode[offOpcode + 2],
2927 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2928 pVCpu->iem.s.offOpcode = offOpcode + 4;
2929 return VINF_SUCCESS;
2930}
2931
2932#endif /* !IEM_WITH_SETJMP */
2933
2934
2935/**
2936 * Fetches the next opcode dword and zero extends it to a quad word, returns
2937 * automatically on failure.
2938 *
2939 * @param a_pu64 Where to return the opcode quad word.
2940 * @remark Implicitly references pVCpu.
2941 */
2942#ifndef IEM_WITH_SETJMP
2943# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2944 do \
2945 { \
2946 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2947 if (rcStrict2 != VINF_SUCCESS) \
2948 return rcStrict2; \
2949 } while (0)
2950#else
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2952#endif
2953
2954
2955#ifndef IEM_WITH_SETJMP
2956/**
2957 * Fetches the next signed double word from the opcode stream.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pi32 Where to return the signed double word.
2962 */
2963DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2964{
2965 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2966}
2967#endif
2968
2969/**
2970 * Fetches the next signed double word from the opcode stream, returning
2971 * automatically on failure.
2972 *
2973 * @param a_pi32 Where to return the signed double word.
2974 * @remark Implicitly references pVCpu.
2975 */
2976#ifndef IEM_WITH_SETJMP
2977# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2978 do \
2979 { \
2980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2981 if (rcStrict2 != VINF_SUCCESS) \
2982 return rcStrict2; \
2983 } while (0)
2984#else
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2986#endif
2987
2988#ifndef IEM_WITH_SETJMP
2989
2990/**
2991 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2992 *
2993 * @returns Strict VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param pu64 Where to return the opcode qword.
2996 */
2997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2998{
2999 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3003 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3004 pVCpu->iem.s.abOpcode[offOpcode + 1],
3005 pVCpu->iem.s.abOpcode[offOpcode + 2],
3006 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3007 pVCpu->iem.s.offOpcode = offOpcode + 4;
3008 }
3009 else
3010 *pu64 = 0;
3011 return rcStrict;
3012}
3013
3014
3015/**
3016 * Fetches the next opcode dword, sign extending it into a quad word.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 * @param pu64 Where to return the opcode quad word.
3021 */
3022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3023{
3024 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3025 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3026 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3027
3028 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3029 pVCpu->iem.s.abOpcode[offOpcode + 1],
3030 pVCpu->iem.s.abOpcode[offOpcode + 2],
3031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3032 *pu64 = i32;
3033 pVCpu->iem.s.offOpcode = offOpcode + 4;
3034 return VINF_SUCCESS;
3035}
3036
3037#endif /* !IEM_WITH_SETJMP */
3038
3039
3040/**
3041 * Fetches the next opcode double word and sign extends it to a quad word,
3042 * returns automatically on failure.
3043 *
3044 * @param a_pu64 Where to return the opcode quad word.
3045 * @remark Implicitly references pVCpu.
3046 */
3047#ifndef IEM_WITH_SETJMP
3048# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3049 do \
3050 { \
3051 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3052 if (rcStrict2 != VINF_SUCCESS) \
3053 return rcStrict2; \
3054 } while (0)
3055#else
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3057#endif
3058
3059#ifndef IEM_WITH_SETJMP
3060
3061/**
3062 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3063 *
3064 * @returns Strict VBox status code.
3065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3066 * @param pu64 Where to return the opcode qword.
3067 */
3068DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3069{
3070 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3071 if (rcStrict == VINF_SUCCESS)
3072 {
3073 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3074# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3075 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3076# else
3077 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3078 pVCpu->iem.s.abOpcode[offOpcode + 1],
3079 pVCpu->iem.s.abOpcode[offOpcode + 2],
3080 pVCpu->iem.s.abOpcode[offOpcode + 3],
3081 pVCpu->iem.s.abOpcode[offOpcode + 4],
3082 pVCpu->iem.s.abOpcode[offOpcode + 5],
3083 pVCpu->iem.s.abOpcode[offOpcode + 6],
3084 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3085# endif
3086 pVCpu->iem.s.offOpcode = offOpcode + 8;
3087 }
3088 else
3089 *pu64 = 0;
3090 return rcStrict;
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 * @param pu64 Where to return the opcode qword.
3100 */
3101DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3102{
3103 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3104 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3105 {
3106# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3107 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3108# else
3109 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3110 pVCpu->iem.s.abOpcode[offOpcode + 1],
3111 pVCpu->iem.s.abOpcode[offOpcode + 2],
3112 pVCpu->iem.s.abOpcode[offOpcode + 3],
3113 pVCpu->iem.s.abOpcode[offOpcode + 4],
3114 pVCpu->iem.s.abOpcode[offOpcode + 5],
3115 pVCpu->iem.s.abOpcode[offOpcode + 6],
3116 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3117# endif
3118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3119 return VINF_SUCCESS;
3120 }
3121 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3122}
3123
3124#else /* IEM_WITH_SETJMP */
3125
3126/**
3127 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3128 *
3129 * @returns The opcode qword.
3130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3131 */
3132DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3133{
3134# ifdef IEM_WITH_CODE_TLB
3135 uint64_t u64;
3136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3137 return u64;
3138# else
3139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3140 if (rcStrict == VINF_SUCCESS)
3141 {
3142 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3143 pVCpu->iem.s.offOpcode = offOpcode + 8;
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 }
3157 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3158# endif
3159}
3160
3161
3162/**
3163 * Fetches the next opcode qword, longjmp on error.
3164 *
3165 * @returns The opcode qword.
3166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3167 */
3168DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3169{
3170# ifdef IEM_WITH_CODE_TLB
3171 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3172 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3173 if (RT_LIKELY( pbBuf != NULL
3174 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3175 {
3176 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3178 return *(uint64_t const *)&pbBuf[offBuf];
3179# else
3180 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3181 pbBuf[offBuf + 1],
3182 pbBuf[offBuf + 2],
3183 pbBuf[offBuf + 3],
3184 pbBuf[offBuf + 4],
3185 pbBuf[offBuf + 5],
3186 pbBuf[offBuf + 6],
3187 pbBuf[offBuf + 7]);
3188# endif
3189 }
3190# else
3191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3193 {
3194 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3195# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3196 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3197# else
3198 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3199 pVCpu->iem.s.abOpcode[offOpcode + 1],
3200 pVCpu->iem.s.abOpcode[offOpcode + 2],
3201 pVCpu->iem.s.abOpcode[offOpcode + 3],
3202 pVCpu->iem.s.abOpcode[offOpcode + 4],
3203 pVCpu->iem.s.abOpcode[offOpcode + 5],
3204 pVCpu->iem.s.abOpcode[offOpcode + 6],
3205 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3206# endif
3207 }
3208# endif
3209 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3210}
3211
3212#endif /* IEM_WITH_SETJMP */
3213
3214/**
3215 * Fetches the next opcode quad word, returns automatically on failure.
3216 *
3217 * @param a_pu64 Where to return the opcode quad word.
3218 * @remark Implicitly references pVCpu.
3219 */
3220#ifndef IEM_WITH_SETJMP
3221# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3222 do \
3223 { \
3224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3225 if (rcStrict2 != VINF_SUCCESS) \
3226 return rcStrict2; \
3227 } while (0)
3228#else
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3230#endif
3231
3232
3233/** @name Misc Worker Functions.
3234 * @{
3235 */
3236
3237/**
3238 * Gets the exception class for the specified exception vector.
3239 *
3240 * @returns The class of the specified exception.
3241 * @param uVector The exception vector.
3242 */
3243IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3244{
3245 Assert(uVector <= X86_XCPT_LAST);
3246 switch (uVector)
3247 {
3248 case X86_XCPT_DE:
3249 case X86_XCPT_TS:
3250 case X86_XCPT_NP:
3251 case X86_XCPT_SS:
3252 case X86_XCPT_GP:
3253 case X86_XCPT_SX: /* AMD only */
3254 return IEMXCPTCLASS_CONTRIBUTORY;
3255
3256 case X86_XCPT_PF:
3257 case X86_XCPT_VE: /* Intel only */
3258 return IEMXCPTCLASS_PAGE_FAULT;
3259
3260 case X86_XCPT_DF:
3261 return IEMXCPTCLASS_DOUBLE_FAULT;
3262 }
3263 return IEMXCPTCLASS_BENIGN;
3264}
3265
3266
3267/**
3268 * Evaluates how to handle an exception caused during delivery of another event
3269 * (exception / interrupt).
3270 *
3271 * @returns How to handle the recursive exception.
3272 * @param pVCpu The cross context virtual CPU structure of the
3273 * calling thread.
3274 * @param fPrevFlags The flags of the previous event.
3275 * @param uPrevVector The vector of the previous event.
3276 * @param fCurFlags The flags of the current exception.
3277 * @param uCurVector The vector of the current exception.
3278 * @param pfXcptRaiseInfo Where to store additional information about the
3279 * exception condition. Optional.
3280 */
3281VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3282 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3283{
3284 /*
3285 * Only CPU exceptions can be raised while delivering other events, software interrupt
3286 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3287 */
3288 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3289 Assert(pVCpu); RT_NOREF(pVCpu);
3290 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3291
3292 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3293 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3294 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3295 {
3296 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3297 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3298 {
3299 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3300 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3302 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3303 {
3304 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3305 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3306 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3307 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3308 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3309 }
3310 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3312 {
3313 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3315 }
3316 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3317 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3319 {
3320 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3322 }
3323 }
3324 else
3325 {
3326 if (uPrevVector == X86_XCPT_NMI)
3327 {
3328 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3329 if (uCurVector == X86_XCPT_PF)
3330 {
3331 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3332 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3333 }
3334 }
3335 else if ( uPrevVector == X86_XCPT_AC
3336 && uCurVector == X86_XCPT_AC)
3337 {
3338 enmRaise = IEMXCPTRAISE_CPU_HANG;
3339 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3340 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3341 }
3342 }
3343 }
3344 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3345 {
3346 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3347 if (uCurVector == X86_XCPT_PF)
3348 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3349 }
3350 else
3351 {
3352 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3353 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3354 }
3355
3356 if (pfXcptRaiseInfo)
3357 *pfXcptRaiseInfo = fRaiseInfo;
3358 return enmRaise;
3359}
3360
3361
3362/**
3363 * Enters the CPU shutdown state initiated by a triple fault or other
3364 * unrecoverable conditions.
3365 *
3366 * @returns Strict VBox status code.
3367 * @param pVCpu The cross context virtual CPU structure of the
3368 * calling thread.
3369 */
3370IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3371{
3372 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3373 {
3374 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3375 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3376 }
3377
3378 RT_NOREF(pVCpu);
3379 return VINF_EM_TRIPLE_FAULT;
3380}
3381
3382
3383/**
3384 * Validates a new SS segment.
3385 *
3386 * @returns VBox strict status code.
3387 * @param pVCpu The cross context virtual CPU structure of the
3388 * calling thread.
3389 * @param pCtx The CPU context.
3390 * @param NewSS The new SS selctor.
3391 * @param uCpl The CPL to load the stack for.
3392 * @param pDesc Where to return the descriptor.
3393 */
3394IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3395{
3396 NOREF(pCtx);
3397
3398 /* Null selectors are not allowed (we're not called for dispatching
3399 interrupts with SS=0 in long mode). */
3400 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3401 {
3402 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3403 return iemRaiseTaskSwitchFault0(pVCpu);
3404 }
3405
3406 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3407 if ((NewSS & X86_SEL_RPL) != uCpl)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 /*
3414 * Read the descriptor.
3415 */
3416 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3417 if (rcStrict != VINF_SUCCESS)
3418 return rcStrict;
3419
3420 /*
3421 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3422 */
3423 if (!pDesc->Legacy.Gen.u1DescType)
3424 {
3425 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3426 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3427 }
3428
3429 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3430 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3436 {
3437 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3438 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3439 }
3440
3441 /* Is it there? */
3442 /** @todo testcase: Is this checked before the canonical / limit check below? */
3443 if (!pDesc->Legacy.Gen.u1Present)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3446 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3447 }
3448
3449 return VINF_SUCCESS;
3450}
3451
3452
3453/**
3454 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3455 * not.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3462 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3463 ? (a_pCtx)->eflags.u \
3464 : CPUMRawGetEFlags(a_pVCpu) )
3465#else
3466# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3467 ( (a_pCtx)->eflags.u )
3468#endif
3469
3470/**
3471 * Updates the EFLAGS in the correct manner wrt. PATM.
3472 *
3473 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3474 * @param a_pCtx The CPU context.
3475 * @param a_fEfl The new EFLAGS.
3476 */
3477#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3478# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3479 do { \
3480 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3481 (a_pCtx)->eflags.u = (a_fEfl); \
3482 else \
3483 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3484 } while (0)
3485#else
3486# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3487 do { \
3488 (a_pCtx)->eflags.u = (a_fEfl); \
3489 } while (0)
3490#endif
3491
3492
3493/** @} */
3494
3495/** @name Raising Exceptions.
3496 *
3497 * @{
3498 */
3499
3500
3501/**
3502 * Loads the specified stack far pointer from the TSS.
3503 *
3504 * @returns VBox strict status code.
3505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3506 * @param pCtx The CPU context.
3507 * @param uCpl The CPL to load the stack for.
3508 * @param pSelSS Where to return the new stack segment.
3509 * @param puEsp Where to return the new stack pointer.
3510 */
3511IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3512 PRTSEL pSelSS, uint32_t *puEsp)
3513{
3514 VBOXSTRICTRC rcStrict;
3515 Assert(uCpl < 4);
3516
3517 switch (pCtx->tr.Attr.n.u4Type)
3518 {
3519 /*
3520 * 16-bit TSS (X86TSS16).
3521 */
3522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3524 {
3525 uint32_t off = uCpl * 4 + 2;
3526 if (off + 4 <= pCtx->tr.u32Limit)
3527 {
3528 /** @todo check actual access pattern here. */
3529 uint32_t u32Tmp = 0; /* gcc maybe... */
3530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3531 if (rcStrict == VINF_SUCCESS)
3532 {
3533 *puEsp = RT_LOWORD(u32Tmp);
3534 *pSelSS = RT_HIWORD(u32Tmp);
3535 return VINF_SUCCESS;
3536 }
3537 }
3538 else
3539 {
3540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3542 }
3543 break;
3544 }
3545
3546 /*
3547 * 32-bit TSS (X86TSS32).
3548 */
3549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3551 {
3552 uint32_t off = uCpl * 8 + 4;
3553 if (off + 7 <= pCtx->tr.u32Limit)
3554 {
3555/** @todo check actual access pattern here. */
3556 uint64_t u64Tmp;
3557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3558 if (rcStrict == VINF_SUCCESS)
3559 {
3560 *puEsp = u64Tmp & UINT32_MAX;
3561 *pSelSS = (RTSEL)(u64Tmp >> 32);
3562 return VINF_SUCCESS;
3563 }
3564 }
3565 else
3566 {
3567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3569 }
3570 break;
3571 }
3572
3573 default:
3574 AssertFailed();
3575 rcStrict = VERR_IEM_IPE_4;
3576 break;
3577 }
3578
3579 *puEsp = 0; /* make gcc happy */
3580 *pSelSS = 0; /* make gcc happy */
3581 return rcStrict;
3582}
3583
3584
3585/**
3586 * Loads the specified stack pointer from the 64-bit TSS.
3587 *
3588 * @returns VBox strict status code.
3589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3590 * @param pCtx The CPU context.
3591 * @param uCpl The CPL to load the stack for.
3592 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3593 * @param puRsp Where to return the new stack pointer.
3594 */
3595IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3596{
3597 Assert(uCpl < 4);
3598 Assert(uIst < 8);
3599 *puRsp = 0; /* make gcc happy */
3600
3601 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3602
3603 uint32_t off;
3604 if (uIst)
3605 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3606 else
3607 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3608 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3609 {
3610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3612 }
3613
3614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3615}
3616
3617
3618/**
3619 * Adjust the CPU state according to the exception being raised.
3620 *
3621 * @param pCtx The CPU context.
3622 * @param u8Vector The exception that has been raised.
3623 */
3624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3625{
3626 switch (u8Vector)
3627 {
3628 case X86_XCPT_DB:
3629 pCtx->dr[7] &= ~X86_DR7_GD;
3630 break;
3631 /** @todo Read the AMD and Intel exception reference... */
3632 }
3633}
3634
3635
3636/**
3637 * Implements exceptions and interrupts for real mode.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param pCtx The CPU context.
3642 * @param cbInstr The number of bytes to offset rIP by in the return
3643 * address.
3644 * @param u8Vector The interrupt / exception vector number.
3645 * @param fFlags The flags.
3646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3648 */
3649IEM_STATIC VBOXSTRICTRC
3650iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3651 PCPUMCTX pCtx,
3652 uint8_t cbInstr,
3653 uint8_t u8Vector,
3654 uint32_t fFlags,
3655 uint16_t uErr,
3656 uint64_t uCr2)
3657{
3658 NOREF(uErr); NOREF(uCr2);
3659
3660 /*
3661 * Read the IDT entry.
3662 */
3663 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3664 {
3665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 RTFAR16 Idte;
3669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3671 return rcStrict;
3672
3673 /*
3674 * Push the stack frame.
3675 */
3676 uint16_t *pu16Frame;
3677 uint64_t uNewRsp;
3678 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3679 if (rcStrict != VINF_SUCCESS)
3680 return rcStrict;
3681
3682 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3683#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3684 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3685 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3686 fEfl |= UINT16_C(0xf000);
3687#endif
3688 pu16Frame[2] = (uint16_t)fEfl;
3689 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3690 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3691 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3692 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3693 return rcStrict;
3694
3695 /*
3696 * Load the vector address into cs:ip and make exception specific state
3697 * adjustments.
3698 */
3699 pCtx->cs.Sel = Idte.sel;
3700 pCtx->cs.ValidSel = Idte.sel;
3701 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3702 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3703 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3704 pCtx->rip = Idte.off;
3705 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3706 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3707
3708 /** @todo do we actually do this in real mode? */
3709 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3710 iemRaiseXcptAdjustState(pCtx, u8Vector);
3711
3712 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3713}
3714
3715
3716/**
3717 * Loads a NULL data selector into when coming from V8086 mode.
3718 *
3719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3720 * @param pSReg Pointer to the segment register.
3721 */
3722IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3723{
3724 pSReg->Sel = 0;
3725 pSReg->ValidSel = 0;
3726 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3727 {
3728 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3729 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3730 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3731 }
3732 else
3733 {
3734 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3735 /** @todo check this on AMD-V */
3736 pSReg->u64Base = 0;
3737 pSReg->u32Limit = 0;
3738 }
3739}
3740
3741
3742/**
3743 * Loads a segment selector during a task switch in V8086 mode.
3744 *
3745 * @param pSReg Pointer to the segment register.
3746 * @param uSel The selector value to load.
3747 */
3748IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3749{
3750 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3751 pSReg->Sel = uSel;
3752 pSReg->ValidSel = uSel;
3753 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3754 pSReg->u64Base = uSel << 4;
3755 pSReg->u32Limit = 0xffff;
3756 pSReg->Attr.u = 0xf3;
3757}
3758
3759
3760/**
3761 * Loads a NULL data selector into a selector register, both the hidden and
3762 * visible parts, in protected mode.
3763 *
3764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3765 * @param pSReg Pointer to the segment register.
3766 * @param uRpl The RPL.
3767 */
3768IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3769{
3770 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3771 * data selector in protected mode. */
3772 pSReg->Sel = uRpl;
3773 pSReg->ValidSel = uRpl;
3774 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3776 {
3777 /* VT-x (Intel 3960x) observed doing something like this. */
3778 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3779 pSReg->u32Limit = UINT32_MAX;
3780 pSReg->u64Base = 0;
3781 }
3782 else
3783 {
3784 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3785 pSReg->u32Limit = 0;
3786 pSReg->u64Base = 0;
3787 }
3788}
3789
3790
3791/**
3792 * Loads a segment selector during a task switch in protected mode.
3793 *
3794 * In this task switch scenario, we would throw \#TS exceptions rather than
3795 * \#GPs.
3796 *
3797 * @returns VBox strict status code.
3798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3799 * @param pSReg Pointer to the segment register.
3800 * @param uSel The new selector value.
3801 *
3802 * @remarks This does _not_ handle CS or SS.
3803 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3804 */
3805IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3806{
3807 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3808
3809 /* Null data selector. */
3810 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3811 {
3812 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3815 return VINF_SUCCESS;
3816 }
3817
3818 /* Fetch the descriptor. */
3819 IEMSELDESC Desc;
3820 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3821 if (rcStrict != VINF_SUCCESS)
3822 {
3823 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3824 VBOXSTRICTRC_VAL(rcStrict)));
3825 return rcStrict;
3826 }
3827
3828 /* Must be a data segment or readable code segment. */
3829 if ( !Desc.Legacy.Gen.u1DescType
3830 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3831 {
3832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3833 Desc.Legacy.Gen.u4Type));
3834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3835 }
3836
3837 /* Check privileges for data segments and non-conforming code segments. */
3838 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3839 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3840 {
3841 /* The RPL and the new CPL must be less than or equal to the DPL. */
3842 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3843 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3844 {
3845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3846 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3848 }
3849 }
3850
3851 /* Is it there? */
3852 if (!Desc.Legacy.Gen.u1Present)
3853 {
3854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3855 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3856 }
3857
3858 /* The base and limit. */
3859 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3860 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3861
3862 /*
3863 * Ok, everything checked out fine. Now set the accessed bit before
3864 * committing the result into the registers.
3865 */
3866 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3867 {
3868 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3869 if (rcStrict != VINF_SUCCESS)
3870 return rcStrict;
3871 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3872 }
3873
3874 /* Commit */
3875 pSReg->Sel = uSel;
3876 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3877 pSReg->u32Limit = cbLimit;
3878 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3879 pSReg->ValidSel = uSel;
3880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3882 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3883
3884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3885 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3886 return VINF_SUCCESS;
3887}
3888
3889
3890/**
3891 * Performs a task switch.
3892 *
3893 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3894 * caller is responsible for performing the necessary checks (like DPL, TSS
3895 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3896 * reference for JMP, CALL, IRET.
3897 *
3898 * If the task switch is the due to a software interrupt or hardware exception,
3899 * the caller is responsible for validating the TSS selector and descriptor. See
3900 * Intel Instruction reference for INT n.
3901 *
3902 * @returns VBox strict status code.
3903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3904 * @param pCtx The CPU context.
3905 * @param enmTaskSwitch What caused this task switch.
3906 * @param uNextEip The EIP effective after the task switch.
3907 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3908 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3909 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3910 * @param SelTSS The TSS selector of the new task.
3911 * @param pNewDescTSS Pointer to the new TSS descriptor.
3912 */
3913IEM_STATIC VBOXSTRICTRC
3914iemTaskSwitch(PVMCPU pVCpu,
3915 PCPUMCTX pCtx,
3916 IEMTASKSWITCH enmTaskSwitch,
3917 uint32_t uNextEip,
3918 uint32_t fFlags,
3919 uint16_t uErr,
3920 uint64_t uCr2,
3921 RTSEL SelTSS,
3922 PIEMSELDESC pNewDescTSS)
3923{
3924 Assert(!IEM_IS_REAL_MODE(pVCpu));
3925 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3926
3927 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3928 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3929 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3930 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3931 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3932
3933 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3934 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3935
3936 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3937 fIsNewTSS386, pCtx->eip, uNextEip));
3938
3939 /* Update CR2 in case it's a page-fault. */
3940 /** @todo This should probably be done much earlier in IEM/PGM. See
3941 * @bugref{5653#c49}. */
3942 if (fFlags & IEM_XCPT_FLAGS_CR2)
3943 pCtx->cr2 = uCr2;
3944
3945 /*
3946 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3947 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3948 */
3949 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3950 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3951 if (uNewTSSLimit < uNewTSSLimitMin)
3952 {
3953 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3954 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3955 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3956 }
3957
3958 /*
3959 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3960 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3961 */
3962 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3963 {
3964 uint32_t const uExitInfo1 = SelTSS;
3965 uint32_t uExitInfo2 = uErr;
3966 switch (enmTaskSwitch)
3967 {
3968 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3969 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3970 default: break;
3971 }
3972 if (fFlags & IEM_XCPT_FLAGS_ERR)
3973 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3974 if (pCtx->eflags.Bits.u1RF)
3975 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3976
3977 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3978 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3979 RT_NOREF2(uExitInfo1, uExitInfo2);
3980 }
3981 /** @todo Nested-VMX task-switch intercept. */
3982
3983 /*
3984 * Check the current TSS limit. The last written byte to the current TSS during the
3985 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3986 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3987 *
3988 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3989 * end up with smaller than "legal" TSS limits.
3990 */
3991 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3992 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3993 if (uCurTSSLimit < uCurTSSLimitMin)
3994 {
3995 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3996 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3997 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3998 }
3999
4000 /*
4001 * Verify that the new TSS can be accessed and map it. Map only the required contents
4002 * and not the entire TSS.
4003 */
4004 void *pvNewTSS;
4005 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4006 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4007 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4008 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4009 * not perform correct translation if this happens. See Intel spec. 7.2.1
4010 * "Task-State Segment" */
4011 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4012 if (rcStrict != VINF_SUCCESS)
4013 {
4014 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4015 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4016 return rcStrict;
4017 }
4018
4019 /*
4020 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4021 */
4022 uint32_t u32EFlags = pCtx->eflags.u32;
4023 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4024 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4025 {
4026 PX86DESC pDescCurTSS;
4027 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4028 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4029 if (rcStrict != VINF_SUCCESS)
4030 {
4031 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4032 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4033 return rcStrict;
4034 }
4035
4036 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4037 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4038 if (rcStrict != VINF_SUCCESS)
4039 {
4040 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4041 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4042 return rcStrict;
4043 }
4044
4045 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4046 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4047 {
4048 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4049 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4050 u32EFlags &= ~X86_EFL_NT;
4051 }
4052 }
4053
4054 /*
4055 * Save the CPU state into the current TSS.
4056 */
4057 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4058 if (GCPtrNewTSS == GCPtrCurTSS)
4059 {
4060 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4061 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4062 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4063 }
4064 if (fIsNewTSS386)
4065 {
4066 /*
4067 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4068 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4069 */
4070 void *pvCurTSS32;
4071 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4072 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4073 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4074 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4075 if (rcStrict != VINF_SUCCESS)
4076 {
4077 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4078 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4079 return rcStrict;
4080 }
4081
4082 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4083 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4084 pCurTSS32->eip = uNextEip;
4085 pCurTSS32->eflags = u32EFlags;
4086 pCurTSS32->eax = pCtx->eax;
4087 pCurTSS32->ecx = pCtx->ecx;
4088 pCurTSS32->edx = pCtx->edx;
4089 pCurTSS32->ebx = pCtx->ebx;
4090 pCurTSS32->esp = pCtx->esp;
4091 pCurTSS32->ebp = pCtx->ebp;
4092 pCurTSS32->esi = pCtx->esi;
4093 pCurTSS32->edi = pCtx->edi;
4094 pCurTSS32->es = pCtx->es.Sel;
4095 pCurTSS32->cs = pCtx->cs.Sel;
4096 pCurTSS32->ss = pCtx->ss.Sel;
4097 pCurTSS32->ds = pCtx->ds.Sel;
4098 pCurTSS32->fs = pCtx->fs.Sel;
4099 pCurTSS32->gs = pCtx->gs.Sel;
4100
4101 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4102 if (rcStrict != VINF_SUCCESS)
4103 {
4104 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4105 VBOXSTRICTRC_VAL(rcStrict)));
4106 return rcStrict;
4107 }
4108 }
4109 else
4110 {
4111 /*
4112 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4113 */
4114 void *pvCurTSS16;
4115 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4116 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4117 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4118 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4122 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4127 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4128 pCurTSS16->ip = uNextEip;
4129 pCurTSS16->flags = u32EFlags;
4130 pCurTSS16->ax = pCtx->ax;
4131 pCurTSS16->cx = pCtx->cx;
4132 pCurTSS16->dx = pCtx->dx;
4133 pCurTSS16->bx = pCtx->bx;
4134 pCurTSS16->sp = pCtx->sp;
4135 pCurTSS16->bp = pCtx->bp;
4136 pCurTSS16->si = pCtx->si;
4137 pCurTSS16->di = pCtx->di;
4138 pCurTSS16->es = pCtx->es.Sel;
4139 pCurTSS16->cs = pCtx->cs.Sel;
4140 pCurTSS16->ss = pCtx->ss.Sel;
4141 pCurTSS16->ds = pCtx->ds.Sel;
4142
4143 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4147 VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150 }
4151
4152 /*
4153 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4154 */
4155 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4156 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4157 {
4158 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4159 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4160 pNewTSS->selPrev = pCtx->tr.Sel;
4161 }
4162
4163 /*
4164 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4165 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4166 */
4167 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4168 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4169 bool fNewDebugTrap;
4170 if (fIsNewTSS386)
4171 {
4172 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4173 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4174 uNewEip = pNewTSS32->eip;
4175 uNewEflags = pNewTSS32->eflags;
4176 uNewEax = pNewTSS32->eax;
4177 uNewEcx = pNewTSS32->ecx;
4178 uNewEdx = pNewTSS32->edx;
4179 uNewEbx = pNewTSS32->ebx;
4180 uNewEsp = pNewTSS32->esp;
4181 uNewEbp = pNewTSS32->ebp;
4182 uNewEsi = pNewTSS32->esi;
4183 uNewEdi = pNewTSS32->edi;
4184 uNewES = pNewTSS32->es;
4185 uNewCS = pNewTSS32->cs;
4186 uNewSS = pNewTSS32->ss;
4187 uNewDS = pNewTSS32->ds;
4188 uNewFS = pNewTSS32->fs;
4189 uNewGS = pNewTSS32->gs;
4190 uNewLdt = pNewTSS32->selLdt;
4191 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4192 }
4193 else
4194 {
4195 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4196 uNewCr3 = 0;
4197 uNewEip = pNewTSS16->ip;
4198 uNewEflags = pNewTSS16->flags;
4199 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4200 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4201 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4202 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4203 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4204 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4205 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4206 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4207 uNewES = pNewTSS16->es;
4208 uNewCS = pNewTSS16->cs;
4209 uNewSS = pNewTSS16->ss;
4210 uNewDS = pNewTSS16->ds;
4211 uNewFS = 0;
4212 uNewGS = 0;
4213 uNewLdt = pNewTSS16->selLdt;
4214 fNewDebugTrap = false;
4215 }
4216
4217 if (GCPtrNewTSS == GCPtrCurTSS)
4218 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4219 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4220
4221 /*
4222 * We're done accessing the new TSS.
4223 */
4224 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4225 if (rcStrict != VINF_SUCCESS)
4226 {
4227 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4228 return rcStrict;
4229 }
4230
4231 /*
4232 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4233 */
4234 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4235 {
4236 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4237 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4241 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* Check that the descriptor indicates the new TSS is available (not busy). */
4246 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4247 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4248 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4249
4250 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4251 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4252 if (rcStrict != VINF_SUCCESS)
4253 {
4254 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4255 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4256 return rcStrict;
4257 }
4258 }
4259
4260 /*
4261 * From this point on, we're technically in the new task. We will defer exceptions
4262 * until the completion of the task switch but before executing any instructions in the new task.
4263 */
4264 pCtx->tr.Sel = SelTSS;
4265 pCtx->tr.ValidSel = SelTSS;
4266 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4267 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4268 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4269 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4271
4272 /* Set the busy bit in TR. */
4273 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4274 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4275 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4276 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4277 {
4278 uNewEflags |= X86_EFL_NT;
4279 }
4280
4281 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4282 pCtx->cr0 |= X86_CR0_TS;
4283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4284
4285 pCtx->eip = uNewEip;
4286 pCtx->eax = uNewEax;
4287 pCtx->ecx = uNewEcx;
4288 pCtx->edx = uNewEdx;
4289 pCtx->ebx = uNewEbx;
4290 pCtx->esp = uNewEsp;
4291 pCtx->ebp = uNewEbp;
4292 pCtx->esi = uNewEsi;
4293 pCtx->edi = uNewEdi;
4294
4295 uNewEflags &= X86_EFL_LIVE_MASK;
4296 uNewEflags |= X86_EFL_RA1_MASK;
4297 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4298
4299 /*
4300 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4301 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4302 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4303 */
4304 pCtx->es.Sel = uNewES;
4305 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4306
4307 pCtx->cs.Sel = uNewCS;
4308 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4309
4310 pCtx->ss.Sel = uNewSS;
4311 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4312
4313 pCtx->ds.Sel = uNewDS;
4314 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4315
4316 pCtx->fs.Sel = uNewFS;
4317 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4318
4319 pCtx->gs.Sel = uNewGS;
4320 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4321 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4322
4323 pCtx->ldtr.Sel = uNewLdt;
4324 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4325 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4326 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4327
4328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4329 {
4330 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4337 }
4338
4339 /*
4340 * Switch CR3 for the new task.
4341 */
4342 if ( fIsNewTSS386
4343 && (pCtx->cr0 & X86_CR0_PG))
4344 {
4345 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4346 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4347 {
4348 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4349 AssertRCSuccessReturn(rc, rc);
4350 }
4351 else
4352 pCtx->cr3 = uNewCr3;
4353
4354 /* Inform PGM. */
4355 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4356 {
4357 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4358 AssertRCReturn(rc, rc);
4359 /* ignore informational status codes */
4360 }
4361 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4362 }
4363
4364 /*
4365 * Switch LDTR for the new task.
4366 */
4367 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4368 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4369 else
4370 {
4371 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4372
4373 IEMSELDESC DescNewLdt;
4374 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4375 if (rcStrict != VINF_SUCCESS)
4376 {
4377 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4378 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4379 return rcStrict;
4380 }
4381 if ( !DescNewLdt.Legacy.Gen.u1Present
4382 || DescNewLdt.Legacy.Gen.u1DescType
4383 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4384 {
4385 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4386 uNewLdt, DescNewLdt.Legacy.u));
4387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4388 }
4389
4390 pCtx->ldtr.ValidSel = uNewLdt;
4391 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4392 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4393 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4394 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4395 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4396 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4398 }
4399
4400 IEMSELDESC DescSS;
4401 if (IEM_IS_V86_MODE(pVCpu))
4402 {
4403 pVCpu->iem.s.uCpl = 3;
4404 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4405 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4406 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4407 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4408 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4409 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4410
4411 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4412 DescSS.Legacy.u = 0;
4413 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4414 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4415 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4416 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4417 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4418 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4419 DescSS.Legacy.Gen.u2Dpl = 3;
4420 }
4421 else
4422 {
4423 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4424
4425 /*
4426 * Load the stack segment for the new task.
4427 */
4428 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4429 {
4430 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4431 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4432 }
4433
4434 /* Fetch the descriptor. */
4435 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4436 if (rcStrict != VINF_SUCCESS)
4437 {
4438 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4439 VBOXSTRICTRC_VAL(rcStrict)));
4440 return rcStrict;
4441 }
4442
4443 /* SS must be a data segment and writable. */
4444 if ( !DescSS.Legacy.Gen.u1DescType
4445 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4446 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4447 {
4448 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4449 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4450 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4454 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4455 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4456 {
4457 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4458 uNewCpl));
4459 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 /* Is it there? */
4463 if (!DescSS.Legacy.Gen.u1Present)
4464 {
4465 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4466 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4470 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4471
4472 /* Set the accessed bit before committing the result into SS. */
4473 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4474 {
4475 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4476 if (rcStrict != VINF_SUCCESS)
4477 return rcStrict;
4478 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4479 }
4480
4481 /* Commit SS. */
4482 pCtx->ss.Sel = uNewSS;
4483 pCtx->ss.ValidSel = uNewSS;
4484 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4485 pCtx->ss.u32Limit = cbLimit;
4486 pCtx->ss.u64Base = u64Base;
4487 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4489
4490 /* CPL has changed, update IEM before loading rest of segments. */
4491 pVCpu->iem.s.uCpl = uNewCpl;
4492
4493 /*
4494 * Load the data segments for the new task.
4495 */
4496 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4497 if (rcStrict != VINF_SUCCESS)
4498 return rcStrict;
4499 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4500 if (rcStrict != VINF_SUCCESS)
4501 return rcStrict;
4502 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4503 if (rcStrict != VINF_SUCCESS)
4504 return rcStrict;
4505 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4506 if (rcStrict != VINF_SUCCESS)
4507 return rcStrict;
4508
4509 /*
4510 * Load the code segment for the new task.
4511 */
4512 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4513 {
4514 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4515 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4516 }
4517
4518 /* Fetch the descriptor. */
4519 IEMSELDESC DescCS;
4520 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4521 if (rcStrict != VINF_SUCCESS)
4522 {
4523 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4524 return rcStrict;
4525 }
4526
4527 /* CS must be a code segment. */
4528 if ( !DescCS.Legacy.Gen.u1DescType
4529 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4530 {
4531 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4532 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4533 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4534 }
4535
4536 /* For conforming CS, DPL must be less than or equal to the RPL. */
4537 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4538 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4539 {
4540 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4541 DescCS.Legacy.Gen.u2Dpl));
4542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 /* For non-conforming CS, DPL must match RPL. */
4546 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4547 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4548 {
4549 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4550 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4551 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4552 }
4553
4554 /* Is it there? */
4555 if (!DescCS.Legacy.Gen.u1Present)
4556 {
4557 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4558 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4559 }
4560
4561 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4562 u64Base = X86DESC_BASE(&DescCS.Legacy);
4563
4564 /* Set the accessed bit before committing the result into CS. */
4565 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4566 {
4567 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4568 if (rcStrict != VINF_SUCCESS)
4569 return rcStrict;
4570 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4571 }
4572
4573 /* Commit CS. */
4574 pCtx->cs.Sel = uNewCS;
4575 pCtx->cs.ValidSel = uNewCS;
4576 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4577 pCtx->cs.u32Limit = cbLimit;
4578 pCtx->cs.u64Base = u64Base;
4579 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4581 }
4582
4583 /** @todo Debug trap. */
4584 if (fIsNewTSS386 && fNewDebugTrap)
4585 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4586
4587 /*
4588 * Construct the error code masks based on what caused this task switch.
4589 * See Intel Instruction reference for INT.
4590 */
4591 uint16_t uExt;
4592 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4593 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4594 {
4595 uExt = 1;
4596 }
4597 else
4598 uExt = 0;
4599
4600 /*
4601 * Push any error code on to the new stack.
4602 */
4603 if (fFlags & IEM_XCPT_FLAGS_ERR)
4604 {
4605 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4606 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4607 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4608
4609 /* Check that there is sufficient space on the stack. */
4610 /** @todo Factor out segment limit checking for normal/expand down segments
4611 * into a separate function. */
4612 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4613 {
4614 if ( pCtx->esp - 1 > cbLimitSS
4615 || pCtx->esp < cbStackFrame)
4616 {
4617 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4618 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4619 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4620 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4621 }
4622 }
4623 else
4624 {
4625 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4626 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4627 {
4628 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4629 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4630 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4631 }
4632 }
4633
4634
4635 if (fIsNewTSS386)
4636 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4637 else
4638 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4639 if (rcStrict != VINF_SUCCESS)
4640 {
4641 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4642 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4643 return rcStrict;
4644 }
4645 }
4646
4647 /* Check the new EIP against the new CS limit. */
4648 if (pCtx->eip > pCtx->cs.u32Limit)
4649 {
4650 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4651 pCtx->eip, pCtx->cs.u32Limit));
4652 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4653 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4654 }
4655
4656 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4657 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4658}
4659
4660
4661/**
4662 * Implements exceptions and interrupts for protected mode.
4663 *
4664 * @returns VBox strict status code.
4665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4666 * @param pCtx The CPU context.
4667 * @param cbInstr The number of bytes to offset rIP by in the return
4668 * address.
4669 * @param u8Vector The interrupt / exception vector number.
4670 * @param fFlags The flags.
4671 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4672 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4673 */
4674IEM_STATIC VBOXSTRICTRC
4675iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4676 PCPUMCTX pCtx,
4677 uint8_t cbInstr,
4678 uint8_t u8Vector,
4679 uint32_t fFlags,
4680 uint16_t uErr,
4681 uint64_t uCr2)
4682{
4683 /*
4684 * Read the IDT entry.
4685 */
4686 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4687 {
4688 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4689 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4690 }
4691 X86DESC Idte;
4692 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4693 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4694 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4695 return rcStrict;
4696 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4697 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4698 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4699
4700 /*
4701 * Check the descriptor type, DPL and such.
4702 * ASSUMES this is done in the same order as described for call-gate calls.
4703 */
4704 if (Idte.Gate.u1DescType)
4705 {
4706 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4707 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4708 }
4709 bool fTaskGate = false;
4710 uint8_t f32BitGate = true;
4711 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4712 switch (Idte.Gate.u4Type)
4713 {
4714 case X86_SEL_TYPE_SYS_UNDEFINED:
4715 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4716 case X86_SEL_TYPE_SYS_LDT:
4717 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4718 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4719 case X86_SEL_TYPE_SYS_UNDEFINED2:
4720 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4721 case X86_SEL_TYPE_SYS_UNDEFINED3:
4722 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4723 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4724 case X86_SEL_TYPE_SYS_UNDEFINED4:
4725 {
4726 /** @todo check what actually happens when the type is wrong...
4727 * esp. call gates. */
4728 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4729 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4730 }
4731
4732 case X86_SEL_TYPE_SYS_286_INT_GATE:
4733 f32BitGate = false;
4734 RT_FALL_THRU();
4735 case X86_SEL_TYPE_SYS_386_INT_GATE:
4736 fEflToClear |= X86_EFL_IF;
4737 break;
4738
4739 case X86_SEL_TYPE_SYS_TASK_GATE:
4740 fTaskGate = true;
4741#ifndef IEM_IMPLEMENTS_TASKSWITCH
4742 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4743#endif
4744 break;
4745
4746 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4747 f32BitGate = false;
4748 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4749 break;
4750
4751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4752 }
4753
4754 /* Check DPL against CPL if applicable. */
4755 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4756 {
4757 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4758 {
4759 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4760 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4761 }
4762 }
4763
4764 /* Is it there? */
4765 if (!Idte.Gate.u1Present)
4766 {
4767 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4768 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4769 }
4770
4771 /* Is it a task-gate? */
4772 if (fTaskGate)
4773 {
4774 /*
4775 * Construct the error code masks based on what caused this task switch.
4776 * See Intel Instruction reference for INT.
4777 */
4778 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4779 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4780 RTSEL SelTSS = Idte.Gate.u16Sel;
4781
4782 /*
4783 * Fetch the TSS descriptor in the GDT.
4784 */
4785 IEMSELDESC DescTSS;
4786 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4787 if (rcStrict != VINF_SUCCESS)
4788 {
4789 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4790 VBOXSTRICTRC_VAL(rcStrict)));
4791 return rcStrict;
4792 }
4793
4794 /* The TSS descriptor must be a system segment and be available (not busy). */
4795 if ( DescTSS.Legacy.Gen.u1DescType
4796 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4797 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4798 {
4799 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4800 u8Vector, SelTSS, DescTSS.Legacy.au64));
4801 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4802 }
4803
4804 /* The TSS must be present. */
4805 if (!DescTSS.Legacy.Gen.u1Present)
4806 {
4807 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* Do the actual task switch. */
4812 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4813 }
4814
4815 /* A null CS is bad. */
4816 RTSEL NewCS = Idte.Gate.u16Sel;
4817 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4818 {
4819 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4820 return iemRaiseGeneralProtectionFault0(pVCpu);
4821 }
4822
4823 /* Fetch the descriptor for the new CS. */
4824 IEMSELDESC DescCS;
4825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4826 if (rcStrict != VINF_SUCCESS)
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4829 return rcStrict;
4830 }
4831
4832 /* Must be a code segment. */
4833 if (!DescCS.Legacy.Gen.u1DescType)
4834 {
4835 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4836 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4837 }
4838 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4839 {
4840 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4841 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4842 }
4843
4844 /* Don't allow lowering the privilege level. */
4845 /** @todo Does the lowering of privileges apply to software interrupts
4846 * only? This has bearings on the more-privileged or
4847 * same-privilege stack behavior further down. A testcase would
4848 * be nice. */
4849 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4850 {
4851 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4852 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4853 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4854 }
4855
4856 /* Make sure the selector is present. */
4857 if (!DescCS.Legacy.Gen.u1Present)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4860 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4861 }
4862
4863 /* Check the new EIP against the new CS limit. */
4864 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4865 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4866 ? Idte.Gate.u16OffsetLow
4867 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4868 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4869 if (uNewEip > cbLimitCS)
4870 {
4871 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4872 u8Vector, uNewEip, cbLimitCS, NewCS));
4873 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4874 }
4875 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4876
4877 /* Calc the flag image to push. */
4878 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4879 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4880 fEfl &= ~X86_EFL_RF;
4881 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4882 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4883
4884 /* From V8086 mode only go to CPL 0. */
4885 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4886 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4887 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4888 {
4889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4890 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4891 }
4892
4893 /*
4894 * If the privilege level changes, we need to get a new stack from the TSS.
4895 * This in turns means validating the new SS and ESP...
4896 */
4897 if (uNewCpl != pVCpu->iem.s.uCpl)
4898 {
4899 RTSEL NewSS;
4900 uint32_t uNewEsp;
4901 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4902 if (rcStrict != VINF_SUCCESS)
4903 return rcStrict;
4904
4905 IEMSELDESC DescSS;
4906 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4907 if (rcStrict != VINF_SUCCESS)
4908 return rcStrict;
4909 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4910 if (!DescSS.Legacy.Gen.u1DefBig)
4911 {
4912 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4913 uNewEsp = (uint16_t)uNewEsp;
4914 }
4915
4916 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4917
4918 /* Check that there is sufficient space for the stack frame. */
4919 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4920 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4921 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4922 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4923
4924 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4925 {
4926 if ( uNewEsp - 1 > cbLimitSS
4927 || uNewEsp < cbStackFrame)
4928 {
4929 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4930 u8Vector, NewSS, uNewEsp, cbStackFrame));
4931 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4932 }
4933 }
4934 else
4935 {
4936 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4937 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4938 {
4939 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4940 u8Vector, NewSS, uNewEsp, cbStackFrame));
4941 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4942 }
4943 }
4944
4945 /*
4946 * Start making changes.
4947 */
4948
4949 /* Set the new CPL so that stack accesses use it. */
4950 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4951 pVCpu->iem.s.uCpl = uNewCpl;
4952
4953 /* Create the stack frame. */
4954 RTPTRUNION uStackFrame;
4955 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4956 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4957 if (rcStrict != VINF_SUCCESS)
4958 return rcStrict;
4959 void * const pvStackFrame = uStackFrame.pv;
4960 if (f32BitGate)
4961 {
4962 if (fFlags & IEM_XCPT_FLAGS_ERR)
4963 *uStackFrame.pu32++ = uErr;
4964 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4965 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4966 uStackFrame.pu32[2] = fEfl;
4967 uStackFrame.pu32[3] = pCtx->esp;
4968 uStackFrame.pu32[4] = pCtx->ss.Sel;
4969 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4970 if (fEfl & X86_EFL_VM)
4971 {
4972 uStackFrame.pu32[1] = pCtx->cs.Sel;
4973 uStackFrame.pu32[5] = pCtx->es.Sel;
4974 uStackFrame.pu32[6] = pCtx->ds.Sel;
4975 uStackFrame.pu32[7] = pCtx->fs.Sel;
4976 uStackFrame.pu32[8] = pCtx->gs.Sel;
4977 }
4978 }
4979 else
4980 {
4981 if (fFlags & IEM_XCPT_FLAGS_ERR)
4982 *uStackFrame.pu16++ = uErr;
4983 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4984 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4985 uStackFrame.pu16[2] = fEfl;
4986 uStackFrame.pu16[3] = pCtx->sp;
4987 uStackFrame.pu16[4] = pCtx->ss.Sel;
4988 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4989 if (fEfl & X86_EFL_VM)
4990 {
4991 uStackFrame.pu16[1] = pCtx->cs.Sel;
4992 uStackFrame.pu16[5] = pCtx->es.Sel;
4993 uStackFrame.pu16[6] = pCtx->ds.Sel;
4994 uStackFrame.pu16[7] = pCtx->fs.Sel;
4995 uStackFrame.pu16[8] = pCtx->gs.Sel;
4996 }
4997 }
4998 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4999 if (rcStrict != VINF_SUCCESS)
5000 return rcStrict;
5001
5002 /* Mark the selectors 'accessed' (hope this is the correct time). */
5003 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5004 * after pushing the stack frame? (Write protect the gdt + stack to
5005 * find out.) */
5006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5007 {
5008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5009 if (rcStrict != VINF_SUCCESS)
5010 return rcStrict;
5011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5012 }
5013
5014 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5015 {
5016 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5017 if (rcStrict != VINF_SUCCESS)
5018 return rcStrict;
5019 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5020 }
5021
5022 /*
5023 * Start comitting the register changes (joins with the DPL=CPL branch).
5024 */
5025 pCtx->ss.Sel = NewSS;
5026 pCtx->ss.ValidSel = NewSS;
5027 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5028 pCtx->ss.u32Limit = cbLimitSS;
5029 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5030 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5031 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5032 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5033 * SP is loaded).
5034 * Need to check the other combinations too:
5035 * - 16-bit TSS, 32-bit handler
5036 * - 32-bit TSS, 16-bit handler */
5037 if (!pCtx->ss.Attr.n.u1DefBig)
5038 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5039 else
5040 pCtx->rsp = uNewEsp - cbStackFrame;
5041
5042 if (fEfl & X86_EFL_VM)
5043 {
5044 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5048 }
5049 }
5050 /*
5051 * Same privilege, no stack change and smaller stack frame.
5052 */
5053 else
5054 {
5055 uint64_t uNewRsp;
5056 RTPTRUNION uStackFrame;
5057 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5058 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5059 if (rcStrict != VINF_SUCCESS)
5060 return rcStrict;
5061 void * const pvStackFrame = uStackFrame.pv;
5062
5063 if (f32BitGate)
5064 {
5065 if (fFlags & IEM_XCPT_FLAGS_ERR)
5066 *uStackFrame.pu32++ = uErr;
5067 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5068 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5069 uStackFrame.pu32[2] = fEfl;
5070 }
5071 else
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu16++ = uErr;
5075 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5076 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5077 uStackFrame.pu16[2] = fEfl;
5078 }
5079 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5080 if (rcStrict != VINF_SUCCESS)
5081 return rcStrict;
5082
5083 /* Mark the CS selector as 'accessed'. */
5084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5085 {
5086 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5087 if (rcStrict != VINF_SUCCESS)
5088 return rcStrict;
5089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5090 }
5091
5092 /*
5093 * Start committing the register changes (joins with the other branch).
5094 */
5095 pCtx->rsp = uNewRsp;
5096 }
5097
5098 /* ... register committing continues. */
5099 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5100 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5102 pCtx->cs.u32Limit = cbLimitCS;
5103 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5104 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5105
5106 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5107 fEfl &= ~fEflToClear;
5108 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5109
5110 if (fFlags & IEM_XCPT_FLAGS_CR2)
5111 pCtx->cr2 = uCr2;
5112
5113 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5114 iemRaiseXcptAdjustState(pCtx, u8Vector);
5115
5116 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5117}
5118
5119
5120/**
5121 * Implements exceptions and interrupts for long mode.
5122 *
5123 * @returns VBox strict status code.
5124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5125 * @param pCtx The CPU context.
5126 * @param cbInstr The number of bytes to offset rIP by in the return
5127 * address.
5128 * @param u8Vector The interrupt / exception vector number.
5129 * @param fFlags The flags.
5130 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5131 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5132 */
5133IEM_STATIC VBOXSTRICTRC
5134iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5135 PCPUMCTX pCtx,
5136 uint8_t cbInstr,
5137 uint8_t u8Vector,
5138 uint32_t fFlags,
5139 uint16_t uErr,
5140 uint64_t uCr2)
5141{
5142 /*
5143 * Read the IDT entry.
5144 */
5145 uint16_t offIdt = (uint16_t)u8Vector << 4;
5146 if (pCtx->idtr.cbIdt < offIdt + 7)
5147 {
5148 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5149 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5150 }
5151 X86DESC64 Idte;
5152 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5153 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5154 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5155 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5156 return rcStrict;
5157 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5158 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5159 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5160
5161 /*
5162 * Check the descriptor type, DPL and such.
5163 * ASSUMES this is done in the same order as described for call-gate calls.
5164 */
5165 if (Idte.Gate.u1DescType)
5166 {
5167 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5168 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5169 }
5170 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5171 switch (Idte.Gate.u4Type)
5172 {
5173 case AMD64_SEL_TYPE_SYS_INT_GATE:
5174 fEflToClear |= X86_EFL_IF;
5175 break;
5176 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5177 break;
5178
5179 default:
5180 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5181 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5182 }
5183
5184 /* Check DPL against CPL if applicable. */
5185 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5186 {
5187 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5188 {
5189 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5190 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5191 }
5192 }
5193
5194 /* Is it there? */
5195 if (!Idte.Gate.u1Present)
5196 {
5197 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5198 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5199 }
5200
5201 /* A null CS is bad. */
5202 RTSEL NewCS = Idte.Gate.u16Sel;
5203 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5206 return iemRaiseGeneralProtectionFault0(pVCpu);
5207 }
5208
5209 /* Fetch the descriptor for the new CS. */
5210 IEMSELDESC DescCS;
5211 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5212 if (rcStrict != VINF_SUCCESS)
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5215 return rcStrict;
5216 }
5217
5218 /* Must be a 64-bit code segment. */
5219 if (!DescCS.Long.Gen.u1DescType)
5220 {
5221 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5222 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5223 }
5224 if ( !DescCS.Long.Gen.u1Long
5225 || DescCS.Long.Gen.u1DefBig
5226 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5227 {
5228 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5229 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5230 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5231 }
5232
5233 /* Don't allow lowering the privilege level. For non-conforming CS
5234 selectors, the CS.DPL sets the privilege level the trap/interrupt
5235 handler runs at. For conforming CS selectors, the CPL remains
5236 unchanged, but the CS.DPL must be <= CPL. */
5237 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5238 * when CPU in Ring-0. Result \#GP? */
5239 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5242 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5243 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5244 }
5245
5246
5247 /* Make sure the selector is present. */
5248 if (!DescCS.Legacy.Gen.u1Present)
5249 {
5250 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5251 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5252 }
5253
5254 /* Check that the new RIP is canonical. */
5255 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5256 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5257 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5258 if (!IEM_IS_CANONICAL(uNewRip))
5259 {
5260 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5261 return iemRaiseGeneralProtectionFault0(pVCpu);
5262 }
5263
5264 /*
5265 * If the privilege level changes or if the IST isn't zero, we need to get
5266 * a new stack from the TSS.
5267 */
5268 uint64_t uNewRsp;
5269 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5270 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5271 if ( uNewCpl != pVCpu->iem.s.uCpl
5272 || Idte.Gate.u3IST != 0)
5273 {
5274 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5275 if (rcStrict != VINF_SUCCESS)
5276 return rcStrict;
5277 }
5278 else
5279 uNewRsp = pCtx->rsp;
5280 uNewRsp &= ~(uint64_t)0xf;
5281
5282 /*
5283 * Calc the flag image to push.
5284 */
5285 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5286 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5287 fEfl &= ~X86_EFL_RF;
5288 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5289 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5290
5291 /*
5292 * Start making changes.
5293 */
5294 /* Set the new CPL so that stack accesses use it. */
5295 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5296 pVCpu->iem.s.uCpl = uNewCpl;
5297
5298 /* Create the stack frame. */
5299 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5300 RTPTRUNION uStackFrame;
5301 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5302 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5303 if (rcStrict != VINF_SUCCESS)
5304 return rcStrict;
5305 void * const pvStackFrame = uStackFrame.pv;
5306
5307 if (fFlags & IEM_XCPT_FLAGS_ERR)
5308 *uStackFrame.pu64++ = uErr;
5309 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5310 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5311 uStackFrame.pu64[2] = fEfl;
5312 uStackFrame.pu64[3] = pCtx->rsp;
5313 uStackFrame.pu64[4] = pCtx->ss.Sel;
5314 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5315 if (rcStrict != VINF_SUCCESS)
5316 return rcStrict;
5317
5318 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5319 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5320 * after pushing the stack frame? (Write protect the gdt + stack to
5321 * find out.) */
5322 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5323 {
5324 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5325 if (rcStrict != VINF_SUCCESS)
5326 return rcStrict;
5327 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5328 }
5329
5330 /*
5331 * Start comitting the register changes.
5332 */
5333 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5334 * hidden registers when interrupting 32-bit or 16-bit code! */
5335 if (uNewCpl != uOldCpl)
5336 {
5337 pCtx->ss.Sel = 0 | uNewCpl;
5338 pCtx->ss.ValidSel = 0 | uNewCpl;
5339 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5340 pCtx->ss.u32Limit = UINT32_MAX;
5341 pCtx->ss.u64Base = 0;
5342 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5343 }
5344 pCtx->rsp = uNewRsp - cbStackFrame;
5345 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5346 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5347 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5348 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5349 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5350 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5351 pCtx->rip = uNewRip;
5352
5353 fEfl &= ~fEflToClear;
5354 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5355
5356 if (fFlags & IEM_XCPT_FLAGS_CR2)
5357 pCtx->cr2 = uCr2;
5358
5359 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5360 iemRaiseXcptAdjustState(pCtx, u8Vector);
5361
5362 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5363}
5364
5365
5366/**
5367 * Implements exceptions and interrupts.
5368 *
5369 * All exceptions and interrupts goes thru this function!
5370 *
5371 * @returns VBox strict status code.
5372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5373 * @param cbInstr The number of bytes to offset rIP by in the return
5374 * address.
5375 * @param u8Vector The interrupt / exception vector number.
5376 * @param fFlags The flags.
5377 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5378 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5379 */
5380DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5381iemRaiseXcptOrInt(PVMCPU pVCpu,
5382 uint8_t cbInstr,
5383 uint8_t u8Vector,
5384 uint32_t fFlags,
5385 uint16_t uErr,
5386 uint64_t uCr2)
5387{
5388 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5389#ifdef IN_RING0
5390 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5391 AssertRCReturn(rc, rc);
5392#endif
5393
5394#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5395 /*
5396 * Flush prefetch buffer
5397 */
5398 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5399#endif
5400
5401 /*
5402 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5403 */
5404 if ( pCtx->eflags.Bits.u1VM
5405 && pCtx->eflags.Bits.u2IOPL != 3
5406 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5407 && (pCtx->cr0 & X86_CR0_PE) )
5408 {
5409 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5410 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5411 u8Vector = X86_XCPT_GP;
5412 uErr = 0;
5413 }
5414#ifdef DBGFTRACE_ENABLED
5415 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5416 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5417 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5418#endif
5419
5420#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5421 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5422 {
5423 /*
5424 * If the event is being injected as part of VMRUN, it isn't subject to event
5425 * intercepts in the nested-guest. However, secondary exceptions that occur
5426 * during injection of any event -are- subject to exception intercepts.
5427 * See AMD spec. 15.20 "Event Injection".
5428 */
5429 if (!pCtx->hwvirt.svm.fInterceptEvents)
5430 pCtx->hwvirt.svm.fInterceptEvents = 1;
5431 else
5432 {
5433 /*
5434 * Check and handle if the event being raised is intercepted.
5435 */
5436 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5437 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5438 return rcStrict0;
5439 }
5440 }
5441#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5442
5443 /*
5444 * Do recursion accounting.
5445 */
5446 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5447 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5448 if (pVCpu->iem.s.cXcptRecursions == 0)
5449 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5450 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5451 else
5452 {
5453 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5454 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5455 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5456
5457 if (pVCpu->iem.s.cXcptRecursions >= 3)
5458 {
5459#ifdef DEBUG_bird
5460 AssertFailed();
5461#endif
5462 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5463 }
5464
5465 /*
5466 * Evaluate the sequence of recurring events.
5467 */
5468 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5469 NULL /* pXcptRaiseInfo */);
5470 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5471 { /* likely */ }
5472 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5473 {
5474 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5475 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5476 u8Vector = X86_XCPT_DF;
5477 uErr = 0;
5478 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5479 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5480 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5481 }
5482 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5483 {
5484 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5485 return iemInitiateCpuShutdown(pVCpu);
5486 }
5487 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5488 {
5489 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5490 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5491 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5492 return VERR_EM_GUEST_CPU_HANG;
5493 }
5494 else
5495 {
5496 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5497 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5498 return VERR_IEM_IPE_9;
5499 }
5500
5501 /*
5502 * The 'EXT' bit is set when an exception occurs during deliver of an external
5503 * event (such as an interrupt or earlier exception)[1]. Privileged software
5504 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5505 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5506 *
5507 * [1] - Intel spec. 6.13 "Error Code"
5508 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5509 * [3] - Intel Instruction reference for INT n.
5510 */
5511 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5512 && (fFlags & IEM_XCPT_FLAGS_ERR)
5513 && u8Vector != X86_XCPT_PF
5514 && u8Vector != X86_XCPT_DF)
5515 {
5516 uErr |= X86_TRAP_ERR_EXTERNAL;
5517 }
5518 }
5519
5520 pVCpu->iem.s.cXcptRecursions++;
5521 pVCpu->iem.s.uCurXcpt = u8Vector;
5522 pVCpu->iem.s.fCurXcpt = fFlags;
5523 pVCpu->iem.s.uCurXcptErr = uErr;
5524 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5525
5526 /*
5527 * Extensive logging.
5528 */
5529#if defined(LOG_ENABLED) && defined(IN_RING3)
5530 if (LogIs3Enabled())
5531 {
5532 PVM pVM = pVCpu->CTX_SUFF(pVM);
5533 char szRegs[4096];
5534 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5535 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5536 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5537 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5538 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5539 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5540 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5541 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5542 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5543 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5544 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5545 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5546 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5547 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5548 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5549 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5550 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5551 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5552 " efer=%016VR{efer}\n"
5553 " pat=%016VR{pat}\n"
5554 " sf_mask=%016VR{sf_mask}\n"
5555 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5556 " lstar=%016VR{lstar}\n"
5557 " star=%016VR{star} cstar=%016VR{cstar}\n"
5558 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5559 );
5560
5561 char szInstr[256];
5562 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5563 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5564 szInstr, sizeof(szInstr), NULL);
5565 Log3(("%s%s\n", szRegs, szInstr));
5566 }
5567#endif /* LOG_ENABLED */
5568
5569 /*
5570 * Call the mode specific worker function.
5571 */
5572 VBOXSTRICTRC rcStrict;
5573 if (!(pCtx->cr0 & X86_CR0_PE))
5574 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5575 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5576 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5577 else
5578 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5579
5580 /* Flush the prefetch buffer. */
5581#ifdef IEM_WITH_CODE_TLB
5582 pVCpu->iem.s.pbInstrBuf = NULL;
5583#else
5584 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5585#endif
5586
5587 /*
5588 * Unwind.
5589 */
5590 pVCpu->iem.s.cXcptRecursions--;
5591 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5592 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5593 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5594 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5595 return rcStrict;
5596}
5597
5598#ifdef IEM_WITH_SETJMP
5599/**
5600 * See iemRaiseXcptOrInt. Will not return.
5601 */
5602IEM_STATIC DECL_NO_RETURN(void)
5603iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5604 uint8_t cbInstr,
5605 uint8_t u8Vector,
5606 uint32_t fFlags,
5607 uint16_t uErr,
5608 uint64_t uCr2)
5609{
5610 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5611 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5612}
5613#endif
5614
5615
5616/** \#DE - 00. */
5617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5618{
5619 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5620}
5621
5622
5623/** \#DB - 01.
5624 * @note This automatically clear DR7.GD. */
5625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5626{
5627 /** @todo set/clear RF. */
5628 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5629 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5630}
5631
5632
5633/** \#BR - 05. */
5634DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5635{
5636 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5637}
5638
5639
5640/** \#UD - 06. */
5641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5642{
5643 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5644}
5645
5646
5647/** \#NM - 07. */
5648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5649{
5650 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5651}
5652
5653
5654/** \#TS(err) - 0a. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5656{
5657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5658}
5659
5660
5661/** \#TS(tr) - 0a. */
5662DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5663{
5664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5665 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5666}
5667
5668
5669/** \#TS(0) - 0a. */
5670DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5671{
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5673 0, 0);
5674}
5675
5676
5677/** \#TS(err) - 0a. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5681 uSel & X86_SEL_MASK_OFF_RPL, 0);
5682}
5683
5684
5685/** \#NP(err) - 0b. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5689}
5690
5691
5692/** \#NP(sel) - 0b. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5696 uSel & ~X86_SEL_RPL, 0);
5697}
5698
5699
5700/** \#SS(seg) - 0c. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5704 uSel & ~X86_SEL_RPL, 0);
5705}
5706
5707
5708/** \#SS(err) - 0c. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5712}
5713
5714
5715/** \#GP(n) - 0d. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5719}
5720
5721
5722/** \#GP(0) - 0d. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5726}
5727
5728#ifdef IEM_WITH_SETJMP
5729/** \#GP(0) - 0d. */
5730DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5731{
5732 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5733}
5734#endif
5735
5736
5737/** \#GP(sel) - 0d. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5739{
5740 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5741 Sel & ~X86_SEL_RPL, 0);
5742}
5743
5744
5745/** \#GP(0) - 0d. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5749}
5750
5751
5752/** \#GP(sel) - 0d. */
5753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5754{
5755 NOREF(iSegReg); NOREF(fAccess);
5756 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5757 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5758}
5759
5760#ifdef IEM_WITH_SETJMP
5761/** \#GP(sel) - 0d, longjmp. */
5762DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5763{
5764 NOREF(iSegReg); NOREF(fAccess);
5765 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5766 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5767}
5768#endif
5769
5770/** \#GP(sel) - 0d. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5772{
5773 NOREF(Sel);
5774 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5775}
5776
5777#ifdef IEM_WITH_SETJMP
5778/** \#GP(sel) - 0d, longjmp. */
5779DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5780{
5781 NOREF(Sel);
5782 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5783}
5784#endif
5785
5786
5787/** \#GP(sel) - 0d. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5789{
5790 NOREF(iSegReg); NOREF(fAccess);
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794#ifdef IEM_WITH_SETJMP
5795/** \#GP(sel) - 0d, longjmp. */
5796DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5797 uint32_t fAccess)
5798{
5799 NOREF(iSegReg); NOREF(fAccess);
5800 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5801}
5802#endif
5803
5804
5805/** \#PF(n) - 0e. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5807{
5808 uint16_t uErr;
5809 switch (rc)
5810 {
5811 case VERR_PAGE_NOT_PRESENT:
5812 case VERR_PAGE_TABLE_NOT_PRESENT:
5813 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5814 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5815 uErr = 0;
5816 break;
5817
5818 default:
5819 AssertMsgFailed(("%Rrc\n", rc));
5820 RT_FALL_THRU();
5821 case VERR_ACCESS_DENIED:
5822 uErr = X86_TRAP_PF_P;
5823 break;
5824
5825 /** @todo reserved */
5826 }
5827
5828 if (pVCpu->iem.s.uCpl == 3)
5829 uErr |= X86_TRAP_PF_US;
5830
5831 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5832 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5833 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5834 uErr |= X86_TRAP_PF_ID;
5835
5836#if 0 /* This is so much non-sense, really. Why was it done like that? */
5837 /* Note! RW access callers reporting a WRITE protection fault, will clear
5838 the READ flag before calling. So, read-modify-write accesses (RW)
5839 can safely be reported as READ faults. */
5840 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5841 uErr |= X86_TRAP_PF_RW;
5842#else
5843 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5844 {
5845 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5846 uErr |= X86_TRAP_PF_RW;
5847 }
5848#endif
5849
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5851 uErr, GCPtrWhere);
5852}
5853
5854#ifdef IEM_WITH_SETJMP
5855/** \#PF(n) - 0e, longjmp. */
5856IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5857{
5858 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5859}
5860#endif
5861
5862
5863/** \#MF(0) - 10. */
5864DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5865{
5866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5867}
5868
5869
5870/** \#AC(0) - 11. */
5871DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5872{
5873 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5874}
5875
5876
5877/**
5878 * Macro for calling iemCImplRaiseDivideError().
5879 *
5880 * This enables us to add/remove arguments and force different levels of
5881 * inlining as we wish.
5882 *
5883 * @return Strict VBox status code.
5884 */
5885#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5886IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5887{
5888 NOREF(cbInstr);
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5890}
5891
5892
5893/**
5894 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5895 *
5896 * This enables us to add/remove arguments and force different levels of
5897 * inlining as we wish.
5898 *
5899 * @return Strict VBox status code.
5900 */
5901#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5902IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5903{
5904 NOREF(cbInstr);
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5906}
5907
5908
5909/**
5910 * Macro for calling iemCImplRaiseInvalidOpcode().
5911 *
5912 * This enables us to add/remove arguments and force different levels of
5913 * inlining as we wish.
5914 *
5915 * @return Strict VBox status code.
5916 */
5917#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5918IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5919{
5920 NOREF(cbInstr);
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5922}
5923
5924
5925/** @} */
5926
5927
5928/*
5929 *
5930 * Helpers routines.
5931 * Helpers routines.
5932 * Helpers routines.
5933 *
5934 */
5935
5936/**
5937 * Recalculates the effective operand size.
5938 *
5939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5940 */
5941IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5942{
5943 switch (pVCpu->iem.s.enmCpuMode)
5944 {
5945 case IEMMODE_16BIT:
5946 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5947 break;
5948 case IEMMODE_32BIT:
5949 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5950 break;
5951 case IEMMODE_64BIT:
5952 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5953 {
5954 case 0:
5955 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5956 break;
5957 case IEM_OP_PRF_SIZE_OP:
5958 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5959 break;
5960 case IEM_OP_PRF_SIZE_REX_W:
5961 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5962 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5963 break;
5964 }
5965 break;
5966 default:
5967 AssertFailed();
5968 }
5969}
5970
5971
5972/**
5973 * Sets the default operand size to 64-bit and recalculates the effective
5974 * operand size.
5975 *
5976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5977 */
5978IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5979{
5980 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5981 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5982 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5983 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5984 else
5985 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5986}
5987
5988
5989/*
5990 *
5991 * Common opcode decoders.
5992 * Common opcode decoders.
5993 * Common opcode decoders.
5994 *
5995 */
5996//#include <iprt/mem.h>
5997
5998/**
5999 * Used to add extra details about a stub case.
6000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6001 */
6002IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6003{
6004#if defined(LOG_ENABLED) && defined(IN_RING3)
6005 PVM pVM = pVCpu->CTX_SUFF(pVM);
6006 char szRegs[4096];
6007 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6008 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6009 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6010 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6011 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6012 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6013 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6014 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6015 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6016 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6017 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6018 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6019 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6020 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6021 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6022 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6023 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6024 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6025 " efer=%016VR{efer}\n"
6026 " pat=%016VR{pat}\n"
6027 " sf_mask=%016VR{sf_mask}\n"
6028 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6029 " lstar=%016VR{lstar}\n"
6030 " star=%016VR{star} cstar=%016VR{cstar}\n"
6031 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6032 );
6033
6034 char szInstr[256];
6035 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6036 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6037 szInstr, sizeof(szInstr), NULL);
6038
6039 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6040#else
6041 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6042#endif
6043}
6044
6045/**
6046 * Complains about a stub.
6047 *
6048 * Providing two versions of this macro, one for daily use and one for use when
6049 * working on IEM.
6050 */
6051#if 0
6052# define IEMOP_BITCH_ABOUT_STUB() \
6053 do { \
6054 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6055 iemOpStubMsg2(pVCpu); \
6056 RTAssertPanic(); \
6057 } while (0)
6058#else
6059# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6060#endif
6061
6062/** Stubs an opcode. */
6063#define FNIEMOP_STUB(a_Name) \
6064 FNIEMOP_DEF(a_Name) \
6065 { \
6066 RT_NOREF_PV(pVCpu); \
6067 IEMOP_BITCH_ABOUT_STUB(); \
6068 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6069 } \
6070 typedef int ignore_semicolon
6071
6072/** Stubs an opcode. */
6073#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6074 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6075 { \
6076 RT_NOREF_PV(pVCpu); \
6077 RT_NOREF_PV(a_Name0); \
6078 IEMOP_BITCH_ABOUT_STUB(); \
6079 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6080 } \
6081 typedef int ignore_semicolon
6082
6083/** Stubs an opcode which currently should raise \#UD. */
6084#define FNIEMOP_UD_STUB(a_Name) \
6085 FNIEMOP_DEF(a_Name) \
6086 { \
6087 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6088 return IEMOP_RAISE_INVALID_OPCODE(); \
6089 } \
6090 typedef int ignore_semicolon
6091
6092/** Stubs an opcode which currently should raise \#UD. */
6093#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6094 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6095 { \
6096 RT_NOREF_PV(pVCpu); \
6097 RT_NOREF_PV(a_Name0); \
6098 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6099 return IEMOP_RAISE_INVALID_OPCODE(); \
6100 } \
6101 typedef int ignore_semicolon
6102
6103
6104
6105/** @name Register Access.
6106 * @{
6107 */
6108
6109/**
6110 * Gets a reference (pointer) to the specified hidden segment register.
6111 *
6112 * @returns Hidden register reference.
6113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6114 * @param iSegReg The segment register.
6115 */
6116IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6117{
6118 Assert(iSegReg < X86_SREG_COUNT);
6119 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6120 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6121
6122#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6123 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6124 { /* likely */ }
6125 else
6126 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6127#else
6128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6129#endif
6130 return pSReg;
6131}
6132
6133
6134/**
6135 * Ensures that the given hidden segment register is up to date.
6136 *
6137 * @returns Hidden register reference.
6138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6139 * @param pSReg The segment register.
6140 */
6141IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6142{
6143#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6144 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6145 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6146#else
6147 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6148 NOREF(pVCpu);
6149#endif
6150 return pSReg;
6151}
6152
6153
6154/**
6155 * Gets a reference (pointer) to the specified segment register (the selector
6156 * value).
6157 *
6158 * @returns Pointer to the selector variable.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6166 return &pCtx->aSRegs[iSegReg].Sel;
6167}
6168
6169
6170/**
6171 * Fetches the selector value of a segment register.
6172 *
6173 * @returns The selector value.
6174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6175 * @param iSegReg The segment register.
6176 */
6177DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6178{
6179 Assert(iSegReg < X86_SREG_COUNT);
6180 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6181}
6182
6183
6184/**
6185 * Fetches the base address value of a segment register.
6186 *
6187 * @returns The selector value.
6188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6189 * @param iSegReg The segment register.
6190 */
6191DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6192{
6193 Assert(iSegReg < X86_SREG_COUNT);
6194 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6195}
6196
6197
6198/**
6199 * Gets a reference (pointer) to the specified general purpose register.
6200 *
6201 * @returns Register reference.
6202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6203 * @param iReg The general purpose register.
6204 */
6205DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6206{
6207 Assert(iReg < 16);
6208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6209 return &pCtx->aGRegs[iReg];
6210}
6211
6212
6213/**
6214 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6215 *
6216 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6217 *
6218 * @returns Register reference.
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param iReg The register.
6221 */
6222DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6223{
6224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6225 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6226 {
6227 Assert(iReg < 16);
6228 return &pCtx->aGRegs[iReg].u8;
6229 }
6230 /* high 8-bit register. */
6231 Assert(iReg < 8);
6232 return &pCtx->aGRegs[iReg & 3].bHi;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u16;
6248}
6249
6250
6251/**
6252 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6253 *
6254 * @returns Register reference.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 Assert(iReg < 16);
6261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6262 return &pCtx->aGRegs[iReg].u32;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6268 *
6269 * @returns Register reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iReg The register.
6272 */
6273DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6274{
6275 Assert(iReg < 64);
6276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6277 return &pCtx->aGRegs[iReg].u64;
6278}
6279
6280
6281/**
6282 * Gets a reference (pointer) to the specified segment register's base address.
6283 *
6284 * @returns Segment register base address reference.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iSegReg The segment selector.
6287 */
6288DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6289{
6290 Assert(iSegReg < X86_SREG_COUNT);
6291 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6292 return &pCtx->aSRegs[iSegReg].u64Base;
6293}
6294
6295
6296/**
6297 * Fetches the value of a 8-bit general purpose register.
6298 *
6299 * @returns The register value.
6300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6301 * @param iReg The register.
6302 */
6303DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6304{
6305 return *iemGRegRefU8(pVCpu, iReg);
6306}
6307
6308
6309/**
6310 * Fetches the value of a 16-bit general purpose register.
6311 *
6312 * @returns The register value.
6313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6314 * @param iReg The register.
6315 */
6316DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6317{
6318 Assert(iReg < 16);
6319 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6320}
6321
6322
6323/**
6324 * Fetches the value of a 32-bit general purpose register.
6325 *
6326 * @returns The register value.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The register.
6329 */
6330DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 Assert(iReg < 16);
6333 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6334}
6335
6336
6337/**
6338 * Fetches the value of a 64-bit general purpose register.
6339 *
6340 * @returns The register value.
6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6342 * @param iReg The register.
6343 */
6344DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6345{
6346 Assert(iReg < 16);
6347 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6348}
6349
6350
6351/**
6352 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6353 *
6354 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6355 * segment limit.
6356 *
6357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6358 * @param offNextInstr The offset of the next instruction.
6359 */
6360IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6361{
6362 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6363 switch (pVCpu->iem.s.enmEffOpSize)
6364 {
6365 case IEMMODE_16BIT:
6366 {
6367 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6368 if ( uNewIp > pCtx->cs.u32Limit
6369 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6370 return iemRaiseGeneralProtectionFault0(pVCpu);
6371 pCtx->rip = uNewIp;
6372 break;
6373 }
6374
6375 case IEMMODE_32BIT:
6376 {
6377 Assert(pCtx->rip <= UINT32_MAX);
6378 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6379
6380 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6381 if (uNewEip > pCtx->cs.u32Limit)
6382 return iemRaiseGeneralProtectionFault0(pVCpu);
6383 pCtx->rip = uNewEip;
6384 break;
6385 }
6386
6387 case IEMMODE_64BIT:
6388 {
6389 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6390
6391 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6392 if (!IEM_IS_CANONICAL(uNewRip))
6393 return iemRaiseGeneralProtectionFault0(pVCpu);
6394 pCtx->rip = uNewRip;
6395 break;
6396 }
6397
6398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6399 }
6400
6401 pCtx->eflags.Bits.u1RF = 0;
6402
6403#ifndef IEM_WITH_CODE_TLB
6404 /* Flush the prefetch buffer. */
6405 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6406#endif
6407
6408 return VINF_SUCCESS;
6409}
6410
6411
6412/**
6413 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6414 *
6415 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6416 * segment limit.
6417 *
6418 * @returns Strict VBox status code.
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 * @param offNextInstr The offset of the next instruction.
6421 */
6422IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6423{
6424 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6425 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6426
6427 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6428 if ( uNewIp > pCtx->cs.u32Limit
6429 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6430 return iemRaiseGeneralProtectionFault0(pVCpu);
6431 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6432 pCtx->rip = uNewIp;
6433 pCtx->eflags.Bits.u1RF = 0;
6434
6435#ifndef IEM_WITH_CODE_TLB
6436 /* Flush the prefetch buffer. */
6437 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6438#endif
6439
6440 return VINF_SUCCESS;
6441}
6442
6443
6444/**
6445 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6446 *
6447 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6448 * segment limit.
6449 *
6450 * @returns Strict VBox status code.
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 * @param offNextInstr The offset of the next instruction.
6453 */
6454IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6455{
6456 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6457 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6458
6459 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6460 {
6461 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6462
6463 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6464 if (uNewEip > pCtx->cs.u32Limit)
6465 return iemRaiseGeneralProtectionFault0(pVCpu);
6466 pCtx->rip = uNewEip;
6467 }
6468 else
6469 {
6470 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6471
6472 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6473 if (!IEM_IS_CANONICAL(uNewRip))
6474 return iemRaiseGeneralProtectionFault0(pVCpu);
6475 pCtx->rip = uNewRip;
6476 }
6477 pCtx->eflags.Bits.u1RF = 0;
6478
6479#ifndef IEM_WITH_CODE_TLB
6480 /* Flush the prefetch buffer. */
6481 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6482#endif
6483
6484 return VINF_SUCCESS;
6485}
6486
6487
6488/**
6489 * Performs a near jump to the specified address.
6490 *
6491 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6492 * segment limit.
6493 *
6494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6495 * @param uNewRip The new RIP value.
6496 */
6497IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6498{
6499 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6500 switch (pVCpu->iem.s.enmEffOpSize)
6501 {
6502 case IEMMODE_16BIT:
6503 {
6504 Assert(uNewRip <= UINT16_MAX);
6505 if ( uNewRip > pCtx->cs.u32Limit
6506 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6507 return iemRaiseGeneralProtectionFault0(pVCpu);
6508 /** @todo Test 16-bit jump in 64-bit mode. */
6509 pCtx->rip = uNewRip;
6510 break;
6511 }
6512
6513 case IEMMODE_32BIT:
6514 {
6515 Assert(uNewRip <= UINT32_MAX);
6516 Assert(pCtx->rip <= UINT32_MAX);
6517 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6518
6519 if (uNewRip > pCtx->cs.u32Limit)
6520 return iemRaiseGeneralProtectionFault0(pVCpu);
6521 pCtx->rip = uNewRip;
6522 break;
6523 }
6524
6525 case IEMMODE_64BIT:
6526 {
6527 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6528
6529 if (!IEM_IS_CANONICAL(uNewRip))
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pCtx->rip = uNewRip;
6532 break;
6533 }
6534
6535 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6536 }
6537
6538 pCtx->eflags.Bits.u1RF = 0;
6539
6540#ifndef IEM_WITH_CODE_TLB
6541 /* Flush the prefetch buffer. */
6542 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6543#endif
6544
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Get the address of the top of the stack.
6551 *
6552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6553 * @param pCtx The CPU context which SP/ESP/RSP should be
6554 * read.
6555 */
6556DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6557{
6558 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6559 return pCtx->rsp;
6560 if (pCtx->ss.Attr.n.u1DefBig)
6561 return pCtx->esp;
6562 return pCtx->sp;
6563}
6564
6565
6566/**
6567 * Updates the RIP/EIP/IP to point to the next instruction.
6568 *
6569 * This function leaves the EFLAGS.RF flag alone.
6570 *
6571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6572 * @param cbInstr The number of bytes to add.
6573 */
6574IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6575{
6576 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6577 switch (pVCpu->iem.s.enmCpuMode)
6578 {
6579 case IEMMODE_16BIT:
6580 Assert(pCtx->rip <= UINT16_MAX);
6581 pCtx->eip += cbInstr;
6582 pCtx->eip &= UINT32_C(0xffff);
6583 break;
6584
6585 case IEMMODE_32BIT:
6586 pCtx->eip += cbInstr;
6587 Assert(pCtx->rip <= UINT32_MAX);
6588 break;
6589
6590 case IEMMODE_64BIT:
6591 pCtx->rip += cbInstr;
6592 break;
6593 default: AssertFailed();
6594 }
6595}
6596
6597
6598#if 0
6599/**
6600 * Updates the RIP/EIP/IP to point to the next instruction.
6601 *
6602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6603 */
6604IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6605{
6606 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6607}
6608#endif
6609
6610
6611
6612/**
6613 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6614 *
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 * @param cbInstr The number of bytes to add.
6617 */
6618IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6619{
6620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6621
6622 pCtx->eflags.Bits.u1RF = 0;
6623
6624 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6625#if ARCH_BITS >= 64
6626 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6627 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6628 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6629#else
6630 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6631 pCtx->rip += cbInstr;
6632 else
6633 pCtx->eip += cbInstr;
6634#endif
6635}
6636
6637
6638/**
6639 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6640 *
6641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6642 */
6643IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6644{
6645 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6646}
6647
6648
6649/**
6650 * Adds to the stack pointer.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param pCtx The CPU context which SP/ESP/RSP should be
6654 * updated.
6655 * @param cbToAdd The number of bytes to add (8-bit!).
6656 */
6657DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6658{
6659 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6660 pCtx->rsp += cbToAdd;
6661 else if (pCtx->ss.Attr.n.u1DefBig)
6662 pCtx->esp += cbToAdd;
6663 else
6664 pCtx->sp += cbToAdd;
6665}
6666
6667
6668/**
6669 * Subtracts from the stack pointer.
6670 *
6671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6672 * @param pCtx The CPU context which SP/ESP/RSP should be
6673 * updated.
6674 * @param cbToSub The number of bytes to subtract (8-bit!).
6675 */
6676DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6677{
6678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6679 pCtx->rsp -= cbToSub;
6680 else if (pCtx->ss.Attr.n.u1DefBig)
6681 pCtx->esp -= cbToSub;
6682 else
6683 pCtx->sp -= cbToSub;
6684}
6685
6686
6687/**
6688 * Adds to the temporary stack pointer.
6689 *
6690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6691 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6692 * @param cbToAdd The number of bytes to add (16-bit).
6693 * @param pCtx Where to get the current stack mode.
6694 */
6695DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6696{
6697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6698 pTmpRsp->u += cbToAdd;
6699 else if (pCtx->ss.Attr.n.u1DefBig)
6700 pTmpRsp->DWords.dw0 += cbToAdd;
6701 else
6702 pTmpRsp->Words.w0 += cbToAdd;
6703}
6704
6705
6706/**
6707 * Subtracts from the temporary stack pointer.
6708 *
6709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6710 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6711 * @param cbToSub The number of bytes to subtract.
6712 * @param pCtx Where to get the current stack mode.
6713 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6714 * expecting that.
6715 */
6716DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6717{
6718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6719 pTmpRsp->u -= cbToSub;
6720 else if (pCtx->ss.Attr.n.u1DefBig)
6721 pTmpRsp->DWords.dw0 -= cbToSub;
6722 else
6723 pTmpRsp->Words.w0 -= cbToSub;
6724}
6725
6726
6727/**
6728 * Calculates the effective stack address for a push of the specified size as
6729 * well as the new RSP value (upper bits may be masked).
6730 *
6731 * @returns Effective stack addressf for the push.
6732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6733 * @param pCtx Where to get the current stack mode.
6734 * @param cbItem The size of the stack item to pop.
6735 * @param puNewRsp Where to return the new RSP value.
6736 */
6737DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6738{
6739 RTUINT64U uTmpRsp;
6740 RTGCPTR GCPtrTop;
6741 uTmpRsp.u = pCtx->rsp;
6742
6743 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6744 GCPtrTop = uTmpRsp.u -= cbItem;
6745 else if (pCtx->ss.Attr.n.u1DefBig)
6746 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6747 else
6748 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6749 *puNewRsp = uTmpRsp.u;
6750 return GCPtrTop;
6751}
6752
6753
6754/**
6755 * Gets the current stack pointer and calculates the value after a pop of the
6756 * specified size.
6757 *
6758 * @returns Current stack pointer.
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param pCtx Where to get the current stack mode.
6761 * @param cbItem The size of the stack item to pop.
6762 * @param puNewRsp Where to return the new RSP value.
6763 */
6764DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6765{
6766 RTUINT64U uTmpRsp;
6767 RTGCPTR GCPtrTop;
6768 uTmpRsp.u = pCtx->rsp;
6769
6770 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6771 {
6772 GCPtrTop = uTmpRsp.u;
6773 uTmpRsp.u += cbItem;
6774 }
6775 else if (pCtx->ss.Attr.n.u1DefBig)
6776 {
6777 GCPtrTop = uTmpRsp.DWords.dw0;
6778 uTmpRsp.DWords.dw0 += cbItem;
6779 }
6780 else
6781 {
6782 GCPtrTop = uTmpRsp.Words.w0;
6783 uTmpRsp.Words.w0 += cbItem;
6784 }
6785 *puNewRsp = uTmpRsp.u;
6786 return GCPtrTop;
6787}
6788
6789
6790/**
6791 * Calculates the effective stack address for a push of the specified size as
6792 * well as the new temporary RSP value (upper bits may be masked).
6793 *
6794 * @returns Effective stack addressf for the push.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param pCtx Where to get the current stack mode.
6797 * @param pTmpRsp The temporary stack pointer. This is updated.
6798 * @param cbItem The size of the stack item to pop.
6799 */
6800DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6801{
6802 RTGCPTR GCPtrTop;
6803
6804 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6805 GCPtrTop = pTmpRsp->u -= cbItem;
6806 else if (pCtx->ss.Attr.n.u1DefBig)
6807 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6808 else
6809 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6810 return GCPtrTop;
6811}
6812
6813
6814/**
6815 * Gets the effective stack address for a pop of the specified size and
6816 * calculates and updates the temporary RSP.
6817 *
6818 * @returns Current stack pointer.
6819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6820 * @param pCtx Where to get the current stack mode.
6821 * @param pTmpRsp The temporary stack pointer. This is updated.
6822 * @param cbItem The size of the stack item to pop.
6823 */
6824DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6825{
6826 RTGCPTR GCPtrTop;
6827 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6828 {
6829 GCPtrTop = pTmpRsp->u;
6830 pTmpRsp->u += cbItem;
6831 }
6832 else if (pCtx->ss.Attr.n.u1DefBig)
6833 {
6834 GCPtrTop = pTmpRsp->DWords.dw0;
6835 pTmpRsp->DWords.dw0 += cbItem;
6836 }
6837 else
6838 {
6839 GCPtrTop = pTmpRsp->Words.w0;
6840 pTmpRsp->Words.w0 += cbItem;
6841 }
6842 return GCPtrTop;
6843}
6844
6845/** @} */
6846
6847
6848/** @name FPU access and helpers.
6849 *
6850 * @{
6851 */
6852
6853
6854/**
6855 * Hook for preparing to use the host FPU.
6856 *
6857 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6858 *
6859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6860 */
6861DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6862{
6863#ifdef IN_RING3
6864 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6865#else
6866 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6867#endif
6868}
6869
6870
6871/**
6872 * Hook for preparing to use the host FPU for SSE.
6873 *
6874 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6875 *
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 */
6878DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6879{
6880 iemFpuPrepareUsage(pVCpu);
6881}
6882
6883
6884/**
6885 * Hook for preparing to use the host FPU for AVX.
6886 *
6887 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6888 *
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 */
6891DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6892{
6893 iemFpuPrepareUsage(pVCpu);
6894}
6895
6896
6897/**
6898 * Hook for actualizing the guest FPU state before the interpreter reads it.
6899 *
6900 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6901 *
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 */
6904DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6905{
6906#ifdef IN_RING3
6907 NOREF(pVCpu);
6908#else
6909 CPUMRZFpuStateActualizeForRead(pVCpu);
6910#endif
6911}
6912
6913
6914/**
6915 * Hook for actualizing the guest FPU state before the interpreter changes it.
6916 *
6917 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 */
6921DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6922{
6923#ifdef IN_RING3
6924 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6925#else
6926 CPUMRZFpuStateActualizeForChange(pVCpu);
6927#endif
6928}
6929
6930
6931/**
6932 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6933 * only.
6934 *
6935 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6940{
6941#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6942 NOREF(pVCpu);
6943#else
6944 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6945#endif
6946}
6947
6948
6949/**
6950 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6951 * read+write.
6952 *
6953 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 */
6957DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6958{
6959#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6960 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6961#else
6962 CPUMRZFpuStateActualizeForChange(pVCpu);
6963#endif
6964}
6965
6966
6967/**
6968 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6969 * only.
6970 *
6971 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 */
6975DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6976{
6977#ifdef IN_RING3
6978 NOREF(pVCpu);
6979#else
6980 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6981#endif
6982}
6983
6984
6985/**
6986 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6987 * read+write.
6988 *
6989 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6990 *
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 */
6993DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6994{
6995#ifdef IN_RING3
6996 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6997#else
6998 CPUMRZFpuStateActualizeForChange(pVCpu);
6999#endif
7000}
7001
7002
7003/**
7004 * Stores a QNaN value into a FPU register.
7005 *
7006 * @param pReg Pointer to the register.
7007 */
7008DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7009{
7010 pReg->au32[0] = UINT32_C(0x00000000);
7011 pReg->au32[1] = UINT32_C(0xc0000000);
7012 pReg->au16[4] = UINT16_C(0xffff);
7013}
7014
7015
7016/**
7017 * Updates the FOP, FPU.CS and FPUIP registers.
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param pCtx The CPU context.
7021 * @param pFpuCtx The FPU context.
7022 */
7023DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7024{
7025 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7026 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7027 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7028 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7029 {
7030 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7031 * happens in real mode here based on the fnsave and fnstenv images. */
7032 pFpuCtx->CS = 0;
7033 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7034 }
7035 else
7036 {
7037 pFpuCtx->CS = pCtx->cs.Sel;
7038 pFpuCtx->FPUIP = pCtx->rip;
7039 }
7040}
7041
7042
7043/**
7044 * Updates the x87.DS and FPUDP registers.
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pCtx The CPU context.
7048 * @param pFpuCtx The FPU context.
7049 * @param iEffSeg The effective segment register.
7050 * @param GCPtrEff The effective address relative to @a iEffSeg.
7051 */
7052DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7053{
7054 RTSEL sel;
7055 switch (iEffSeg)
7056 {
7057 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7058 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7059 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7060 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7061 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7062 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7063 default:
7064 AssertMsgFailed(("%d\n", iEffSeg));
7065 sel = pCtx->ds.Sel;
7066 }
7067 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7068 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7069 {
7070 pFpuCtx->DS = 0;
7071 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7072 }
7073 else
7074 {
7075 pFpuCtx->DS = sel;
7076 pFpuCtx->FPUDP = GCPtrEff;
7077 }
7078}
7079
7080
7081/**
7082 * Rotates the stack registers in the push direction.
7083 *
7084 * @param pFpuCtx The FPU context.
7085 * @remarks This is a complete waste of time, but fxsave stores the registers in
7086 * stack order.
7087 */
7088DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7089{
7090 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7091 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7092 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7093 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7094 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7095 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7096 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7097 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7098 pFpuCtx->aRegs[0].r80 = r80Tmp;
7099}
7100
7101
7102/**
7103 * Rotates the stack registers in the pop direction.
7104 *
7105 * @param pFpuCtx The FPU context.
7106 * @remarks This is a complete waste of time, but fxsave stores the registers in
7107 * stack order.
7108 */
7109DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7110{
7111 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7112 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7113 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7114 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7115 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7116 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7117 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7118 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7119 pFpuCtx->aRegs[7].r80 = r80Tmp;
7120}
7121
7122
7123/**
7124 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7125 * exception prevents it.
7126 *
7127 * @param pResult The FPU operation result to push.
7128 * @param pFpuCtx The FPU context.
7129 */
7130IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7131{
7132 /* Update FSW and bail if there are pending exceptions afterwards. */
7133 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7134 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7135 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7136 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7137 {
7138 pFpuCtx->FSW = fFsw;
7139 return;
7140 }
7141
7142 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7143 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7144 {
7145 /* All is fine, push the actual value. */
7146 pFpuCtx->FTW |= RT_BIT(iNewTop);
7147 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7148 }
7149 else if (pFpuCtx->FCW & X86_FCW_IM)
7150 {
7151 /* Masked stack overflow, push QNaN. */
7152 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7153 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7154 }
7155 else
7156 {
7157 /* Raise stack overflow, don't push anything. */
7158 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7159 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7160 return;
7161 }
7162
7163 fFsw &= ~X86_FSW_TOP_MASK;
7164 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7165 pFpuCtx->FSW = fFsw;
7166
7167 iemFpuRotateStackPush(pFpuCtx);
7168}
7169
7170
7171/**
7172 * Stores a result in a FPU register and updates the FSW and FTW.
7173 *
7174 * @param pFpuCtx The FPU context.
7175 * @param pResult The result to store.
7176 * @param iStReg Which FPU register to store it in.
7177 */
7178IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7179{
7180 Assert(iStReg < 8);
7181 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7182 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7183 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7184 pFpuCtx->FTW |= RT_BIT(iReg);
7185 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7186}
7187
7188
7189/**
7190 * Only updates the FPU status word (FSW) with the result of the current
7191 * instruction.
7192 *
7193 * @param pFpuCtx The FPU context.
7194 * @param u16FSW The FSW output of the current instruction.
7195 */
7196IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7197{
7198 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7199 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7200}
7201
7202
7203/**
7204 * Pops one item off the FPU stack if no pending exception prevents it.
7205 *
7206 * @param pFpuCtx The FPU context.
7207 */
7208IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7209{
7210 /* Check pending exceptions. */
7211 uint16_t uFSW = pFpuCtx->FSW;
7212 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7213 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7214 return;
7215
7216 /* TOP--. */
7217 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7218 uFSW &= ~X86_FSW_TOP_MASK;
7219 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7220 pFpuCtx->FSW = uFSW;
7221
7222 /* Mark the previous ST0 as empty. */
7223 iOldTop >>= X86_FSW_TOP_SHIFT;
7224 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7225
7226 /* Rotate the registers. */
7227 iemFpuRotateStackPop(pFpuCtx);
7228}
7229
7230
7231/**
7232 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7233 *
7234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7235 * @param pResult The FPU operation result to push.
7236 */
7237IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7238{
7239 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7240 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7241 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7242 iemFpuMaybePushResult(pResult, pFpuCtx);
7243}
7244
7245
7246/**
7247 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7248 * and sets FPUDP and FPUDS.
7249 *
7250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7251 * @param pResult The FPU operation result to push.
7252 * @param iEffSeg The effective segment register.
7253 * @param GCPtrEff The effective address relative to @a iEffSeg.
7254 */
7255IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7256{
7257 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7258 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7259 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7260 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7261 iemFpuMaybePushResult(pResult, pFpuCtx);
7262}
7263
7264
7265/**
7266 * Replace ST0 with the first value and push the second onto the FPU stack,
7267 * unless a pending exception prevents it.
7268 *
7269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7270 * @param pResult The FPU operation result to store and push.
7271 */
7272IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7273{
7274 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7275 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7276 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7277
7278 /* Update FSW and bail if there are pending exceptions afterwards. */
7279 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7280 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7281 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7282 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7283 {
7284 pFpuCtx->FSW = fFsw;
7285 return;
7286 }
7287
7288 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7289 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7290 {
7291 /* All is fine, push the actual value. */
7292 pFpuCtx->FTW |= RT_BIT(iNewTop);
7293 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7294 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7295 }
7296 else if (pFpuCtx->FCW & X86_FCW_IM)
7297 {
7298 /* Masked stack overflow, push QNaN. */
7299 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7300 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7301 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7302 }
7303 else
7304 {
7305 /* Raise stack overflow, don't push anything. */
7306 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7307 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7308 return;
7309 }
7310
7311 fFsw &= ~X86_FSW_TOP_MASK;
7312 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7313 pFpuCtx->FSW = fFsw;
7314
7315 iemFpuRotateStackPush(pFpuCtx);
7316}
7317
7318
7319/**
7320 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7321 * FOP.
7322 *
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param pResult The result to store.
7325 * @param iStReg Which FPU register to store it in.
7326 */
7327IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7328{
7329 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7330 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7331 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7332 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7333}
7334
7335
7336/**
7337 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7338 * FOP, and then pops the stack.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pResult The result to store.
7342 * @param iStReg Which FPU register to store it in.
7343 */
7344IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7345{
7346 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7347 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7348 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7349 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7350 iemFpuMaybePopOne(pFpuCtx);
7351}
7352
7353
7354/**
7355 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7356 * FPUDP, and FPUDS.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pResult The result to store.
7360 * @param iStReg Which FPU register to store it in.
7361 * @param iEffSeg The effective memory operand selector register.
7362 * @param GCPtrEff The effective memory operand offset.
7363 */
7364IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7365 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7366{
7367 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7368 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7369 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7370 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7371 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7372}
7373
7374
7375/**
7376 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7377 * FPUDP, and FPUDS, and then pops the stack.
7378 *
7379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7380 * @param pResult The result to store.
7381 * @param iStReg Which FPU register to store it in.
7382 * @param iEffSeg The effective memory operand selector register.
7383 * @param GCPtrEff The effective memory operand offset.
7384 */
7385IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7386 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7387{
7388 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7389 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7390 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7392 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7393 iemFpuMaybePopOne(pFpuCtx);
7394}
7395
7396
7397/**
7398 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 */
7402IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7403{
7404 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7405 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7406 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7407}
7408
7409
7410/**
7411 * Marks the specified stack register as free (for FFREE).
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 * @param iStReg The register to free.
7415 */
7416IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7417{
7418 Assert(iStReg < 8);
7419 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7420 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7421 pFpuCtx->FTW &= ~RT_BIT(iReg);
7422}
7423
7424
7425/**
7426 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7427 *
7428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7429 */
7430IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7431{
7432 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7433 uint16_t uFsw = pFpuCtx->FSW;
7434 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7435 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7436 uFsw &= ~X86_FSW_TOP_MASK;
7437 uFsw |= uTop;
7438 pFpuCtx->FSW = uFsw;
7439}
7440
7441
7442/**
7443 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 */
7447IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7448{
7449 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7450 uint16_t uFsw = pFpuCtx->FSW;
7451 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7452 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7453 uFsw &= ~X86_FSW_TOP_MASK;
7454 uFsw |= uTop;
7455 pFpuCtx->FSW = uFsw;
7456}
7457
7458
7459/**
7460 * Updates the FSW, FOP, FPUIP, and FPUCS.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param u16FSW The FSW from the current instruction.
7464 */
7465IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7466{
7467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7468 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7470 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7471}
7472
7473
7474/**
7475 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7476 *
7477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7478 * @param u16FSW The FSW from the current instruction.
7479 */
7480IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7481{
7482 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7483 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7484 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7485 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7486 iemFpuMaybePopOne(pFpuCtx);
7487}
7488
7489
7490/**
7491 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7492 *
7493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7494 * @param u16FSW The FSW from the current instruction.
7495 * @param iEffSeg The effective memory operand selector register.
7496 * @param GCPtrEff The effective memory operand offset.
7497 */
7498IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7499{
7500 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7501 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7502 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505}
7506
7507
7508/**
7509 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7510 *
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param u16FSW The FSW from the current instruction.
7513 */
7514IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7515{
7516 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7517 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7518 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7519 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7520 iemFpuMaybePopOne(pFpuCtx);
7521 iemFpuMaybePopOne(pFpuCtx);
7522}
7523
7524
7525/**
7526 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7527 *
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param u16FSW The FSW from the current instruction.
7530 * @param iEffSeg The effective memory operand selector register.
7531 * @param GCPtrEff The effective memory operand offset.
7532 */
7533IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7534{
7535 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7536 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7537 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7539 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544/**
7545 * Worker routine for raising an FPU stack underflow exception.
7546 *
7547 * @param pFpuCtx The FPU context.
7548 * @param iStReg The stack register being accessed.
7549 */
7550IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7551{
7552 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7553 if (pFpuCtx->FCW & X86_FCW_IM)
7554 {
7555 /* Masked underflow. */
7556 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7557 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7558 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7559 if (iStReg != UINT8_MAX)
7560 {
7561 pFpuCtx->FTW |= RT_BIT(iReg);
7562 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7563 }
7564 }
7565 else
7566 {
7567 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7568 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7569 }
7570}
7571
7572
7573/**
7574 * Raises a FPU stack underflow exception.
7575 *
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 * @param iStReg The destination register that should be loaded
7578 * with QNaN if \#IS is not masked. Specify
7579 * UINT8_MAX if none (like for fcom).
7580 */
7581DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7582{
7583 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7585 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7586 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7587}
7588
7589
7590DECL_NO_INLINE(IEM_STATIC, void)
7591iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7592{
7593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7594 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7597 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7598}
7599
7600
7601DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7602{
7603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7604 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7606 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7607 iemFpuMaybePopOne(pFpuCtx);
7608}
7609
7610
7611DECL_NO_INLINE(IEM_STATIC, void)
7612iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7613{
7614 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7615 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7616 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7617 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7618 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7619 iemFpuMaybePopOne(pFpuCtx);
7620}
7621
7622
7623DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7624{
7625 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7626 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7628 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7629 iemFpuMaybePopOne(pFpuCtx);
7630 iemFpuMaybePopOne(pFpuCtx);
7631}
7632
7633
7634DECL_NO_INLINE(IEM_STATIC, void)
7635iemFpuStackPushUnderflow(PVMCPU pVCpu)
7636{
7637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7638 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7639 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7640
7641 if (pFpuCtx->FCW & X86_FCW_IM)
7642 {
7643 /* Masked overflow - Push QNaN. */
7644 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7645 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7646 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7647 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7648 pFpuCtx->FTW |= RT_BIT(iNewTop);
7649 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7650 iemFpuRotateStackPush(pFpuCtx);
7651 }
7652 else
7653 {
7654 /* Exception pending - don't change TOP or the register stack. */
7655 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7657 }
7658}
7659
7660
7661DECL_NO_INLINE(IEM_STATIC, void)
7662iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7663{
7664 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7665 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7667
7668 if (pFpuCtx->FCW & X86_FCW_IM)
7669 {
7670 /* Masked overflow - Push QNaN. */
7671 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7672 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7673 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7674 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7675 pFpuCtx->FTW |= RT_BIT(iNewTop);
7676 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7677 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7678 iemFpuRotateStackPush(pFpuCtx);
7679 }
7680 else
7681 {
7682 /* Exception pending - don't change TOP or the register stack. */
7683 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7684 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7685 }
7686}
7687
7688
7689/**
7690 * Worker routine for raising an FPU stack overflow exception on a push.
7691 *
7692 * @param pFpuCtx The FPU context.
7693 */
7694IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7695{
7696 if (pFpuCtx->FCW & X86_FCW_IM)
7697 {
7698 /* Masked overflow. */
7699 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7700 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7701 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7702 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7703 pFpuCtx->FTW |= RT_BIT(iNewTop);
7704 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7705 iemFpuRotateStackPush(pFpuCtx);
7706 }
7707 else
7708 {
7709 /* Exception pending - don't change TOP or the register stack. */
7710 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7711 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7712 }
7713}
7714
7715
7716/**
7717 * Raises a FPU stack overflow exception on a push.
7718 *
7719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7720 */
7721DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7722{
7723 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7724 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7726 iemFpuStackPushOverflowOnly(pFpuCtx);
7727}
7728
7729
7730/**
7731 * Raises a FPU stack overflow exception on a push with a memory operand.
7732 *
7733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7734 * @param iEffSeg The effective memory operand selector register.
7735 * @param GCPtrEff The effective memory operand offset.
7736 */
7737DECL_NO_INLINE(IEM_STATIC, void)
7738iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7739{
7740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7741 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7742 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7743 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7744 iemFpuStackPushOverflowOnly(pFpuCtx);
7745}
7746
7747
7748IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7749{
7750 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7751 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7752 if (pFpuCtx->FTW & RT_BIT(iReg))
7753 return VINF_SUCCESS;
7754 return VERR_NOT_FOUND;
7755}
7756
7757
7758IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7759{
7760 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7761 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7762 if (pFpuCtx->FTW & RT_BIT(iReg))
7763 {
7764 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7765 return VINF_SUCCESS;
7766 }
7767 return VERR_NOT_FOUND;
7768}
7769
7770
7771IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7772 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7773{
7774 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7775 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7776 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7777 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7778 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7779 {
7780 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7781 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7789{
7790 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7791 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7792 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7793 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7794 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7795 {
7796 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7797 return VINF_SUCCESS;
7798 }
7799 return VERR_NOT_FOUND;
7800}
7801
7802
7803/**
7804 * Updates the FPU exception status after FCW is changed.
7805 *
7806 * @param pFpuCtx The FPU context.
7807 */
7808IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7809{
7810 uint16_t u16Fsw = pFpuCtx->FSW;
7811 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7812 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7813 else
7814 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7815 pFpuCtx->FSW = u16Fsw;
7816}
7817
7818
7819/**
7820 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7821 *
7822 * @returns The full FTW.
7823 * @param pFpuCtx The FPU context.
7824 */
7825IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7826{
7827 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7828 uint16_t u16Ftw = 0;
7829 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7830 for (unsigned iSt = 0; iSt < 8; iSt++)
7831 {
7832 unsigned const iReg = (iSt + iTop) & 7;
7833 if (!(u8Ftw & RT_BIT(iReg)))
7834 u16Ftw |= 3 << (iReg * 2); /* empty */
7835 else
7836 {
7837 uint16_t uTag;
7838 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7839 if (pr80Reg->s.uExponent == 0x7fff)
7840 uTag = 2; /* Exponent is all 1's => Special. */
7841 else if (pr80Reg->s.uExponent == 0x0000)
7842 {
7843 if (pr80Reg->s.u64Mantissa == 0x0000)
7844 uTag = 1; /* All bits are zero => Zero. */
7845 else
7846 uTag = 2; /* Must be special. */
7847 }
7848 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7849 uTag = 0; /* Valid. */
7850 else
7851 uTag = 2; /* Must be special. */
7852
7853 u16Ftw |= uTag << (iReg * 2); /* empty */
7854 }
7855 }
7856
7857 return u16Ftw;
7858}
7859
7860
7861/**
7862 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7863 *
7864 * @returns The compressed FTW.
7865 * @param u16FullFtw The full FTW to convert.
7866 */
7867IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7868{
7869 uint8_t u8Ftw = 0;
7870 for (unsigned i = 0; i < 8; i++)
7871 {
7872 if ((u16FullFtw & 3) != 3 /*empty*/)
7873 u8Ftw |= RT_BIT(i);
7874 u16FullFtw >>= 2;
7875 }
7876
7877 return u8Ftw;
7878}
7879
7880/** @} */
7881
7882
7883/** @name Memory access.
7884 *
7885 * @{
7886 */
7887
7888
7889/**
7890 * Updates the IEMCPU::cbWritten counter if applicable.
7891 *
7892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7893 * @param fAccess The access being accounted for.
7894 * @param cbMem The access size.
7895 */
7896DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7897{
7898 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7899 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7900 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7901}
7902
7903
7904/**
7905 * Checks if the given segment can be written to, raise the appropriate
7906 * exception if not.
7907 *
7908 * @returns VBox strict status code.
7909 *
7910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7911 * @param pHid Pointer to the hidden register.
7912 * @param iSegReg The register number.
7913 * @param pu64BaseAddr Where to return the base address to use for the
7914 * segment. (In 64-bit code it may differ from the
7915 * base in the hidden segment.)
7916 */
7917IEM_STATIC VBOXSTRICTRC
7918iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7919{
7920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7921 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7922 else
7923 {
7924 if (!pHid->Attr.n.u1Present)
7925 {
7926 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7927 AssertRelease(uSel == 0);
7928 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7929 return iemRaiseGeneralProtectionFault0(pVCpu);
7930 }
7931
7932 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7933 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7934 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7935 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7936 *pu64BaseAddr = pHid->u64Base;
7937 }
7938 return VINF_SUCCESS;
7939}
7940
7941
7942/**
7943 * Checks if the given segment can be read from, raise the appropriate
7944 * exception if not.
7945 *
7946 * @returns VBox strict status code.
7947 *
7948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7949 * @param pHid Pointer to the hidden register.
7950 * @param iSegReg The register number.
7951 * @param pu64BaseAddr Where to return the base address to use for the
7952 * segment. (In 64-bit code it may differ from the
7953 * base in the hidden segment.)
7954 */
7955IEM_STATIC VBOXSTRICTRC
7956iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7957{
7958 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7959 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7960 else
7961 {
7962 if (!pHid->Attr.n.u1Present)
7963 {
7964 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7965 AssertRelease(uSel == 0);
7966 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7967 return iemRaiseGeneralProtectionFault0(pVCpu);
7968 }
7969
7970 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7971 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7972 *pu64BaseAddr = pHid->u64Base;
7973 }
7974 return VINF_SUCCESS;
7975}
7976
7977
7978/**
7979 * Applies the segment limit, base and attributes.
7980 *
7981 * This may raise a \#GP or \#SS.
7982 *
7983 * @returns VBox strict status code.
7984 *
7985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7986 * @param fAccess The kind of access which is being performed.
7987 * @param iSegReg The index of the segment register to apply.
7988 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7989 * TSS, ++).
7990 * @param cbMem The access size.
7991 * @param pGCPtrMem Pointer to the guest memory address to apply
7992 * segmentation to. Input and output parameter.
7993 */
7994IEM_STATIC VBOXSTRICTRC
7995iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7996{
7997 if (iSegReg == UINT8_MAX)
7998 return VINF_SUCCESS;
7999
8000 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8001 switch (pVCpu->iem.s.enmCpuMode)
8002 {
8003 case IEMMODE_16BIT:
8004 case IEMMODE_32BIT:
8005 {
8006 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8007 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8008
8009 if ( pSel->Attr.n.u1Present
8010 && !pSel->Attr.n.u1Unusable)
8011 {
8012 Assert(pSel->Attr.n.u1DescType);
8013 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8014 {
8015 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8016 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8017 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8018
8019 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8020 {
8021 /** @todo CPL check. */
8022 }
8023
8024 /*
8025 * There are two kinds of data selectors, normal and expand down.
8026 */
8027 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8028 {
8029 if ( GCPtrFirst32 > pSel->u32Limit
8030 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8031 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8032 }
8033 else
8034 {
8035 /*
8036 * The upper boundary is defined by the B bit, not the G bit!
8037 */
8038 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8039 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8040 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8041 }
8042 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8043 }
8044 else
8045 {
8046
8047 /*
8048 * Code selector and usually be used to read thru, writing is
8049 * only permitted in real and V8086 mode.
8050 */
8051 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8052 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8053 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8054 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8055 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8056
8057 if ( GCPtrFirst32 > pSel->u32Limit
8058 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8059 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8060
8061 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8062 {
8063 /** @todo CPL check. */
8064 }
8065
8066 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8067 }
8068 }
8069 else
8070 return iemRaiseGeneralProtectionFault0(pVCpu);
8071 return VINF_SUCCESS;
8072 }
8073
8074 case IEMMODE_64BIT:
8075 {
8076 RTGCPTR GCPtrMem = *pGCPtrMem;
8077 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8078 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8079
8080 Assert(cbMem >= 1);
8081 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8082 return VINF_SUCCESS;
8083 return iemRaiseGeneralProtectionFault0(pVCpu);
8084 }
8085
8086 default:
8087 AssertFailedReturn(VERR_IEM_IPE_7);
8088 }
8089}
8090
8091
8092/**
8093 * Translates a virtual address to a physical physical address and checks if we
8094 * can access the page as specified.
8095 *
8096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8097 * @param GCPtrMem The virtual address.
8098 * @param fAccess The intended access.
8099 * @param pGCPhysMem Where to return the physical address.
8100 */
8101IEM_STATIC VBOXSTRICTRC
8102iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8103{
8104 /** @todo Need a different PGM interface here. We're currently using
8105 * generic / REM interfaces. this won't cut it for R0 & RC. */
8106 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8107 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8108 RTGCPHYS GCPhys;
8109 uint64_t fFlags;
8110 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8111 if (RT_FAILURE(rc))
8112 {
8113 /** @todo Check unassigned memory in unpaged mode. */
8114 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8115 *pGCPhysMem = NIL_RTGCPHYS;
8116 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8117 }
8118
8119 /* If the page is writable and does not have the no-exec bit set, all
8120 access is allowed. Otherwise we'll have to check more carefully... */
8121 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8122 {
8123 /* Write to read only memory? */
8124 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8125 && !(fFlags & X86_PTE_RW)
8126 && ( (pVCpu->iem.s.uCpl == 3
8127 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8128 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8129 {
8130 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8131 *pGCPhysMem = NIL_RTGCPHYS;
8132 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8133 }
8134
8135 /* Kernel memory accessed by userland? */
8136 if ( !(fFlags & X86_PTE_US)
8137 && pVCpu->iem.s.uCpl == 3
8138 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8139 {
8140 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8141 *pGCPhysMem = NIL_RTGCPHYS;
8142 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8143 }
8144
8145 /* Executing non-executable memory? */
8146 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8147 && (fFlags & X86_PTE_PAE_NX)
8148 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8149 {
8150 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8151 *pGCPhysMem = NIL_RTGCPHYS;
8152 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8153 VERR_ACCESS_DENIED);
8154 }
8155 }
8156
8157 /*
8158 * Set the dirty / access flags.
8159 * ASSUMES this is set when the address is translated rather than on committ...
8160 */
8161 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8162 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8163 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8164 {
8165 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8166 AssertRC(rc2);
8167 }
8168
8169 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8170 *pGCPhysMem = GCPhys;
8171 return VINF_SUCCESS;
8172}
8173
8174
8175
8176/**
8177 * Maps a physical page.
8178 *
8179 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8181 * @param GCPhysMem The physical address.
8182 * @param fAccess The intended access.
8183 * @param ppvMem Where to return the mapping address.
8184 * @param pLock The PGM lock.
8185 */
8186IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8187{
8188#ifdef IEM_VERIFICATION_MODE_FULL
8189 /* Force the alternative path so we can ignore writes. */
8190 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8191 {
8192 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8193 {
8194 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8195 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8196 if (RT_FAILURE(rc2))
8197 pVCpu->iem.s.fProblematicMemory = true;
8198 }
8199 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8200 }
8201#endif
8202#ifdef IEM_LOG_MEMORY_WRITES
8203 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8204 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8205#endif
8206#ifdef IEM_VERIFICATION_MODE_MINIMAL
8207 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8208#endif
8209
8210 /** @todo This API may require some improving later. A private deal with PGM
8211 * regarding locking and unlocking needs to be struct. A couple of TLBs
8212 * living in PGM, but with publicly accessible inlined access methods
8213 * could perhaps be an even better solution. */
8214 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8215 GCPhysMem,
8216 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8217 pVCpu->iem.s.fBypassHandlers,
8218 ppvMem,
8219 pLock);
8220 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8221 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8222
8223#ifdef IEM_VERIFICATION_MODE_FULL
8224 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8225 pVCpu->iem.s.fProblematicMemory = true;
8226#endif
8227 return rc;
8228}
8229
8230
8231/**
8232 * Unmap a page previously mapped by iemMemPageMap.
8233 *
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 * @param GCPhysMem The physical address.
8236 * @param fAccess The intended access.
8237 * @param pvMem What iemMemPageMap returned.
8238 * @param pLock The PGM lock.
8239 */
8240DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8241{
8242 NOREF(pVCpu);
8243 NOREF(GCPhysMem);
8244 NOREF(fAccess);
8245 NOREF(pvMem);
8246 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8247}
8248
8249
8250/**
8251 * Looks up a memory mapping entry.
8252 *
8253 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8255 * @param pvMem The memory address.
8256 * @param fAccess The access to.
8257 */
8258DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8259{
8260 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8261 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8262 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8263 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8264 return 0;
8265 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8266 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8267 return 1;
8268 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8269 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8270 return 2;
8271 return VERR_NOT_FOUND;
8272}
8273
8274
8275/**
8276 * Finds a free memmap entry when using iNextMapping doesn't work.
8277 *
8278 * @returns Memory mapping index, 1024 on failure.
8279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8280 */
8281IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8282{
8283 /*
8284 * The easy case.
8285 */
8286 if (pVCpu->iem.s.cActiveMappings == 0)
8287 {
8288 pVCpu->iem.s.iNextMapping = 1;
8289 return 0;
8290 }
8291
8292 /* There should be enough mappings for all instructions. */
8293 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8294
8295 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8296 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8297 return i;
8298
8299 AssertFailedReturn(1024);
8300}
8301
8302
8303/**
8304 * Commits a bounce buffer that needs writing back and unmaps it.
8305 *
8306 * @returns Strict VBox status code.
8307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8308 * @param iMemMap The index of the buffer to commit.
8309 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8310 * Always false in ring-3, obviously.
8311 */
8312IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8313{
8314 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8315 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8316#ifdef IN_RING3
8317 Assert(!fPostponeFail);
8318 RT_NOREF_PV(fPostponeFail);
8319#endif
8320
8321 /*
8322 * Do the writing.
8323 */
8324#ifndef IEM_VERIFICATION_MODE_MINIMAL
8325 PVM pVM = pVCpu->CTX_SUFF(pVM);
8326 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8327 && !IEM_VERIFICATION_ENABLED(pVCpu))
8328 {
8329 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8330 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8331 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8332 if (!pVCpu->iem.s.fBypassHandlers)
8333 {
8334 /*
8335 * Carefully and efficiently dealing with access handler return
8336 * codes make this a little bloated.
8337 */
8338 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8340 pbBuf,
8341 cbFirst,
8342 PGMACCESSORIGIN_IEM);
8343 if (rcStrict == VINF_SUCCESS)
8344 {
8345 if (cbSecond)
8346 {
8347 rcStrict = PGMPhysWrite(pVM,
8348 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8349 pbBuf + cbFirst,
8350 cbSecond,
8351 PGMACCESSORIGIN_IEM);
8352 if (rcStrict == VINF_SUCCESS)
8353 { /* nothing */ }
8354 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8355 {
8356 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8359 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8360 }
8361# ifndef IN_RING3
8362 else if (fPostponeFail)
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8368 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8369 return iemSetPassUpStatus(pVCpu, rcStrict);
8370 }
8371# endif
8372 else
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8377 return rcStrict;
8378 }
8379 }
8380 }
8381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8382 {
8383 if (!cbSecond)
8384 {
8385 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8387 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8388 }
8389 else
8390 {
8391 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8393 pbBuf + cbFirst,
8394 cbSecond,
8395 PGMACCESSORIGIN_IEM);
8396 if (rcStrict2 == VINF_SUCCESS)
8397 {
8398 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8401 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8402 }
8403 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8404 {
8405 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8406 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8408 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8409 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8410 }
8411# ifndef IN_RING3
8412 else if (fPostponeFail)
8413 {
8414 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8417 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8418 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8419 return iemSetPassUpStatus(pVCpu, rcStrict);
8420 }
8421# endif
8422 else
8423 {
8424 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8427 return rcStrict2;
8428 }
8429 }
8430 }
8431# ifndef IN_RING3
8432 else if (fPostponeFail)
8433 {
8434 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8437 if (!cbSecond)
8438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8439 else
8440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8441 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8442 return iemSetPassUpStatus(pVCpu, rcStrict);
8443 }
8444# endif
8445 else
8446 {
8447 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8450 return rcStrict;
8451 }
8452 }
8453 else
8454 {
8455 /*
8456 * No access handlers, much simpler.
8457 */
8458 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8459 if (RT_SUCCESS(rc))
8460 {
8461 if (cbSecond)
8462 {
8463 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8464 if (RT_SUCCESS(rc))
8465 { /* likely */ }
8466 else
8467 {
8468 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8471 return rc;
8472 }
8473 }
8474 }
8475 else
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8480 return rc;
8481 }
8482 }
8483 }
8484#endif
8485
8486#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8487 /*
8488 * Record the write(s).
8489 */
8490 if (!pVCpu->iem.s.fNoRem)
8491 {
8492 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8493 if (pEvtRec)
8494 {
8495 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8496 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8497 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8498 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8499 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8500 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8501 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8502 }
8503 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8504 {
8505 pEvtRec = iemVerifyAllocRecord(pVCpu);
8506 if (pEvtRec)
8507 {
8508 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8509 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8510 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8511 memcpy(pEvtRec->u.RamWrite.ab,
8512 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8514 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8515 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8516 }
8517 }
8518 }
8519#endif
8520#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8521 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8522 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8523 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8524 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8525 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8526 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8527
8528 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8529 g_cbIemWrote = cbWrote;
8530 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8531#endif
8532
8533 /*
8534 * Free the mapping entry.
8535 */
8536 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8537 Assert(pVCpu->iem.s.cActiveMappings != 0);
8538 pVCpu->iem.s.cActiveMappings--;
8539 return VINF_SUCCESS;
8540}
8541
8542
8543/**
8544 * iemMemMap worker that deals with a request crossing pages.
8545 */
8546IEM_STATIC VBOXSTRICTRC
8547iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8548{
8549 /*
8550 * Do the address translations.
8551 */
8552 RTGCPHYS GCPhysFirst;
8553 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8554 if (rcStrict != VINF_SUCCESS)
8555 return rcStrict;
8556
8557 RTGCPHYS GCPhysSecond;
8558 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8559 fAccess, &GCPhysSecond);
8560 if (rcStrict != VINF_SUCCESS)
8561 return rcStrict;
8562 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8563
8564 PVM pVM = pVCpu->CTX_SUFF(pVM);
8565#ifdef IEM_VERIFICATION_MODE_FULL
8566 /*
8567 * Detect problematic memory when verifying so we can select
8568 * the right execution engine. (TLB: Redo this.)
8569 */
8570 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8571 {
8572 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8573 if (RT_SUCCESS(rc2))
8574 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8575 if (RT_FAILURE(rc2))
8576 pVCpu->iem.s.fProblematicMemory = true;
8577 }
8578#endif
8579
8580
8581 /*
8582 * Read in the current memory content if it's a read, execute or partial
8583 * write access.
8584 */
8585 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8586 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8587 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8588
8589 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8590 {
8591 if (!pVCpu->iem.s.fBypassHandlers)
8592 {
8593 /*
8594 * Must carefully deal with access handler status codes here,
8595 * makes the code a bit bloated.
8596 */
8597 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8598 if (rcStrict == VINF_SUCCESS)
8599 {
8600 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8601 if (rcStrict == VINF_SUCCESS)
8602 { /*likely */ }
8603 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8604 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8608 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8609 return rcStrict;
8610 }
8611 }
8612 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8613 {
8614 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8615 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8616 {
8617 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8618 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8619 }
8620 else
8621 {
8622 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8623 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8624 return rcStrict2;
8625 }
8626 }
8627 else
8628 {
8629 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8630 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8631 return rcStrict;
8632 }
8633 }
8634 else
8635 {
8636 /*
8637 * No informational status codes here, much more straight forward.
8638 */
8639 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8640 if (RT_SUCCESS(rc))
8641 {
8642 Assert(rc == VINF_SUCCESS);
8643 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8644 if (RT_SUCCESS(rc))
8645 Assert(rc == VINF_SUCCESS);
8646 else
8647 {
8648 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8649 return rc;
8650 }
8651 }
8652 else
8653 {
8654 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8655 return rc;
8656 }
8657 }
8658
8659#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8660 if ( !pVCpu->iem.s.fNoRem
8661 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8662 {
8663 /*
8664 * Record the reads.
8665 */
8666 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8667 if (pEvtRec)
8668 {
8669 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8670 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8671 pEvtRec->u.RamRead.cb = cbFirstPage;
8672 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8673 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8674 }
8675 pEvtRec = iemVerifyAllocRecord(pVCpu);
8676 if (pEvtRec)
8677 {
8678 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8679 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8680 pEvtRec->u.RamRead.cb = cbSecondPage;
8681 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8682 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8683 }
8684 }
8685#endif
8686 }
8687#ifdef VBOX_STRICT
8688 else
8689 memset(pbBuf, 0xcc, cbMem);
8690 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8691 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8692#endif
8693
8694 /*
8695 * Commit the bounce buffer entry.
8696 */
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8699 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8700 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8701 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8702 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8703 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8704 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8705 pVCpu->iem.s.cActiveMappings++;
8706
8707 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8708 *ppvMem = pbBuf;
8709 return VINF_SUCCESS;
8710}
8711
8712
8713/**
8714 * iemMemMap woker that deals with iemMemPageMap failures.
8715 */
8716IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8717 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8718{
8719 /*
8720 * Filter out conditions we can handle and the ones which shouldn't happen.
8721 */
8722 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8723 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8724 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8725 {
8726 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8727 return rcMap;
8728 }
8729 pVCpu->iem.s.cPotentialExits++;
8730
8731 /*
8732 * Read in the current memory content if it's a read, execute or partial
8733 * write access.
8734 */
8735 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8736 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8737 {
8738 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8739 memset(pbBuf, 0xff, cbMem);
8740 else
8741 {
8742 int rc;
8743 if (!pVCpu->iem.s.fBypassHandlers)
8744 {
8745 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8746 if (rcStrict == VINF_SUCCESS)
8747 { /* nothing */ }
8748 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8749 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8750 else
8751 {
8752 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8753 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8754 return rcStrict;
8755 }
8756 }
8757 else
8758 {
8759 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8760 if (RT_SUCCESS(rc))
8761 { /* likely */ }
8762 else
8763 {
8764 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8765 GCPhysFirst, rc));
8766 return rc;
8767 }
8768 }
8769 }
8770
8771#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8772 if ( !pVCpu->iem.s.fNoRem
8773 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8774 {
8775 /*
8776 * Record the read.
8777 */
8778 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8779 if (pEvtRec)
8780 {
8781 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8782 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8783 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8784 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8785 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8786 }
8787 }
8788#endif
8789 }
8790#ifdef VBOX_STRICT
8791 else
8792 memset(pbBuf, 0xcc, cbMem);
8793#endif
8794#ifdef VBOX_STRICT
8795 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8796 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8797#endif
8798
8799 /*
8800 * Commit the bounce buffer entry.
8801 */
8802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8804 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8805 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8806 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8807 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8809 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8810 pVCpu->iem.s.cActiveMappings++;
8811
8812 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8813 *ppvMem = pbBuf;
8814 return VINF_SUCCESS;
8815}
8816
8817
8818
8819/**
8820 * Maps the specified guest memory for the given kind of access.
8821 *
8822 * This may be using bounce buffering of the memory if it's crossing a page
8823 * boundary or if there is an access handler installed for any of it. Because
8824 * of lock prefix guarantees, we're in for some extra clutter when this
8825 * happens.
8826 *
8827 * This may raise a \#GP, \#SS, \#PF or \#AC.
8828 *
8829 * @returns VBox strict status code.
8830 *
8831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8832 * @param ppvMem Where to return the pointer to the mapped
8833 * memory.
8834 * @param cbMem The number of bytes to map. This is usually 1,
8835 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8836 * string operations it can be up to a page.
8837 * @param iSegReg The index of the segment register to use for
8838 * this access. The base and limits are checked.
8839 * Use UINT8_MAX to indicate that no segmentation
8840 * is required (for IDT, GDT and LDT accesses).
8841 * @param GCPtrMem The address of the guest memory.
8842 * @param fAccess How the memory is being accessed. The
8843 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8844 * how to map the memory, while the
8845 * IEM_ACCESS_WHAT_XXX bit is used when raising
8846 * exceptions.
8847 */
8848IEM_STATIC VBOXSTRICTRC
8849iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8850{
8851 /*
8852 * Check the input and figure out which mapping entry to use.
8853 */
8854 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8855 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8856 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8857
8858 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8859 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8860 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8861 {
8862 iMemMap = iemMemMapFindFree(pVCpu);
8863 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8864 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8865 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8866 pVCpu->iem.s.aMemMappings[2].fAccess),
8867 VERR_IEM_IPE_9);
8868 }
8869
8870 /*
8871 * Map the memory, checking that we can actually access it. If something
8872 * slightly complicated happens, fall back on bounce buffering.
8873 */
8874 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8875 if (rcStrict != VINF_SUCCESS)
8876 return rcStrict;
8877
8878 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8879 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8880
8881 RTGCPHYS GCPhysFirst;
8882 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8883 if (rcStrict != VINF_SUCCESS)
8884 return rcStrict;
8885
8886 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8887 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8888 if (fAccess & IEM_ACCESS_TYPE_READ)
8889 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8890
8891 void *pvMem;
8892 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8893 if (rcStrict != VINF_SUCCESS)
8894 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8895
8896 /*
8897 * Fill in the mapping table entry.
8898 */
8899 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8900 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8901 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8902 pVCpu->iem.s.cActiveMappings++;
8903
8904 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8905 *ppvMem = pvMem;
8906 return VINF_SUCCESS;
8907}
8908
8909
8910/**
8911 * Commits the guest memory if bounce buffered and unmaps it.
8912 *
8913 * @returns Strict VBox status code.
8914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8915 * @param pvMem The mapping.
8916 * @param fAccess The kind of access.
8917 */
8918IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8919{
8920 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8921 AssertReturn(iMemMap >= 0, iMemMap);
8922
8923 /* If it's bounce buffered, we may need to write back the buffer. */
8924 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8925 {
8926 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8927 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8928 }
8929 /* Otherwise unlock it. */
8930 else
8931 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8932
8933 /* Free the entry. */
8934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8935 Assert(pVCpu->iem.s.cActiveMappings != 0);
8936 pVCpu->iem.s.cActiveMappings--;
8937 return VINF_SUCCESS;
8938}
8939
8940#ifdef IEM_WITH_SETJMP
8941
8942/**
8943 * Maps the specified guest memory for the given kind of access, longjmp on
8944 * error.
8945 *
8946 * This may be using bounce buffering of the memory if it's crossing a page
8947 * boundary or if there is an access handler installed for any of it. Because
8948 * of lock prefix guarantees, we're in for some extra clutter when this
8949 * happens.
8950 *
8951 * This may raise a \#GP, \#SS, \#PF or \#AC.
8952 *
8953 * @returns Pointer to the mapped memory.
8954 *
8955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8956 * @param cbMem The number of bytes to map. This is usually 1,
8957 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8958 * string operations it can be up to a page.
8959 * @param iSegReg The index of the segment register to use for
8960 * this access. The base and limits are checked.
8961 * Use UINT8_MAX to indicate that no segmentation
8962 * is required (for IDT, GDT and LDT accesses).
8963 * @param GCPtrMem The address of the guest memory.
8964 * @param fAccess How the memory is being accessed. The
8965 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8966 * how to map the memory, while the
8967 * IEM_ACCESS_WHAT_XXX bit is used when raising
8968 * exceptions.
8969 */
8970IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8971{
8972 /*
8973 * Check the input and figure out which mapping entry to use.
8974 */
8975 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8976 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8977 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8978
8979 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8980 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8981 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8982 {
8983 iMemMap = iemMemMapFindFree(pVCpu);
8984 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8985 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8986 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8987 pVCpu->iem.s.aMemMappings[2].fAccess),
8988 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8989 }
8990
8991 /*
8992 * Map the memory, checking that we can actually access it. If something
8993 * slightly complicated happens, fall back on bounce buffering.
8994 */
8995 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8996 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8997 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8998
8999 /* Crossing a page boundary? */
9000 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9001 { /* No (likely). */ }
9002 else
9003 {
9004 void *pvMem;
9005 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9006 if (rcStrict == VINF_SUCCESS)
9007 return pvMem;
9008 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9009 }
9010
9011 RTGCPHYS GCPhysFirst;
9012 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9013 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9014 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9015
9016 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9017 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9018 if (fAccess & IEM_ACCESS_TYPE_READ)
9019 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9020
9021 void *pvMem;
9022 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9023 if (rcStrict == VINF_SUCCESS)
9024 { /* likely */ }
9025 else
9026 {
9027 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9028 if (rcStrict == VINF_SUCCESS)
9029 return pvMem;
9030 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9031 }
9032
9033 /*
9034 * Fill in the mapping table entry.
9035 */
9036 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9037 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9038 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9039 pVCpu->iem.s.cActiveMappings++;
9040
9041 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9042 return pvMem;
9043}
9044
9045
9046/**
9047 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9048 *
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pvMem The mapping.
9051 * @param fAccess The kind of access.
9052 */
9053IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9054{
9055 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9056 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9057
9058 /* If it's bounce buffered, we may need to write back the buffer. */
9059 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9060 {
9061 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9062 {
9063 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9064 if (rcStrict == VINF_SUCCESS)
9065 return;
9066 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9067 }
9068 }
9069 /* Otherwise unlock it. */
9070 else
9071 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9072
9073 /* Free the entry. */
9074 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9075 Assert(pVCpu->iem.s.cActiveMappings != 0);
9076 pVCpu->iem.s.cActiveMappings--;
9077}
9078
9079#endif
9080
9081#ifndef IN_RING3
9082/**
9083 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9084 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9085 *
9086 * Allows the instruction to be completed and retired, while the IEM user will
9087 * return to ring-3 immediately afterwards and do the postponed writes there.
9088 *
9089 * @returns VBox status code (no strict statuses). Caller must check
9090 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9092 * @param pvMem The mapping.
9093 * @param fAccess The kind of access.
9094 */
9095IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9096{
9097 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9098 AssertReturn(iMemMap >= 0, iMemMap);
9099
9100 /* If it's bounce buffered, we may need to write back the buffer. */
9101 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9102 {
9103 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9104 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9105 }
9106 /* Otherwise unlock it. */
9107 else
9108 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9109
9110 /* Free the entry. */
9111 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9112 Assert(pVCpu->iem.s.cActiveMappings != 0);
9113 pVCpu->iem.s.cActiveMappings--;
9114 return VINF_SUCCESS;
9115}
9116#endif
9117
9118
9119/**
9120 * Rollbacks mappings, releasing page locks and such.
9121 *
9122 * The caller shall only call this after checking cActiveMappings.
9123 *
9124 * @returns Strict VBox status code to pass up.
9125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9126 */
9127IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9128{
9129 Assert(pVCpu->iem.s.cActiveMappings > 0);
9130
9131 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9132 while (iMemMap-- > 0)
9133 {
9134 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9135 if (fAccess != IEM_ACCESS_INVALID)
9136 {
9137 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9138 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9139 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9140 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9141 Assert(pVCpu->iem.s.cActiveMappings > 0);
9142 pVCpu->iem.s.cActiveMappings--;
9143 }
9144 }
9145}
9146
9147
9148/**
9149 * Fetches a data byte.
9150 *
9151 * @returns Strict VBox status code.
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param pu8Dst Where to return the byte.
9154 * @param iSegReg The index of the segment register to use for
9155 * this access. The base and limits are checked.
9156 * @param GCPtrMem The address of the guest memory.
9157 */
9158IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9159{
9160 /* The lazy approach for now... */
9161 uint8_t const *pu8Src;
9162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9163 if (rc == VINF_SUCCESS)
9164 {
9165 *pu8Dst = *pu8Src;
9166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9167 }
9168 return rc;
9169}
9170
9171
9172#ifdef IEM_WITH_SETJMP
9173/**
9174 * Fetches a data byte, longjmp on error.
9175 *
9176 * @returns The byte.
9177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9178 * @param iSegReg The index of the segment register to use for
9179 * this access. The base and limits are checked.
9180 * @param GCPtrMem The address of the guest memory.
9181 */
9182DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9183{
9184 /* The lazy approach for now... */
9185 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 uint8_t const bRet = *pu8Src;
9187 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9188 return bRet;
9189}
9190#endif /* IEM_WITH_SETJMP */
9191
9192
9193/**
9194 * Fetches a data word.
9195 *
9196 * @returns Strict VBox status code.
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param pu16Dst Where to return the word.
9199 * @param iSegReg The index of the segment register to use for
9200 * this access. The base and limits are checked.
9201 * @param GCPtrMem The address of the guest memory.
9202 */
9203IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9204{
9205 /* The lazy approach for now... */
9206 uint16_t const *pu16Src;
9207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9208 if (rc == VINF_SUCCESS)
9209 {
9210 *pu16Dst = *pu16Src;
9211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9212 }
9213 return rc;
9214}
9215
9216
9217#ifdef IEM_WITH_SETJMP
9218/**
9219 * Fetches a data word, longjmp on error.
9220 *
9221 * @returns The word
9222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9223 * @param iSegReg The index of the segment register to use for
9224 * this access. The base and limits are checked.
9225 * @param GCPtrMem The address of the guest memory.
9226 */
9227DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9228{
9229 /* The lazy approach for now... */
9230 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9231 uint16_t const u16Ret = *pu16Src;
9232 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9233 return u16Ret;
9234}
9235#endif
9236
9237
9238/**
9239 * Fetches a data dword.
9240 *
9241 * @returns Strict VBox status code.
9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9243 * @param pu32Dst Where to return the dword.
9244 * @param iSegReg The index of the segment register to use for
9245 * this access. The base and limits are checked.
9246 * @param GCPtrMem The address of the guest memory.
9247 */
9248IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9249{
9250 /* The lazy approach for now... */
9251 uint32_t const *pu32Src;
9252 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9253 if (rc == VINF_SUCCESS)
9254 {
9255 *pu32Dst = *pu32Src;
9256 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9257 }
9258 return rc;
9259}
9260
9261
9262#ifdef IEM_WITH_SETJMP
9263
9264IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9265{
9266 Assert(cbMem >= 1);
9267 Assert(iSegReg < X86_SREG_COUNT);
9268
9269 /*
9270 * 64-bit mode is simpler.
9271 */
9272 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9273 {
9274 if (iSegReg >= X86_SREG_FS)
9275 {
9276 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9277 GCPtrMem += pSel->u64Base;
9278 }
9279
9280 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9281 return GCPtrMem;
9282 }
9283 /*
9284 * 16-bit and 32-bit segmentation.
9285 */
9286 else
9287 {
9288 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9289 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9290 == X86DESCATTR_P /* data, expand up */
9291 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9292 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9293 {
9294 /* expand up */
9295 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9296 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9297 && GCPtrLast32 > (uint32_t)GCPtrMem))
9298 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9299 }
9300 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9301 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9302 {
9303 /* expand down */
9304 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9305 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9306 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9307 && GCPtrLast32 > (uint32_t)GCPtrMem))
9308 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9309 }
9310 else
9311 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9312 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9313 }
9314 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9315}
9316
9317
9318IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9319{
9320 Assert(cbMem >= 1);
9321 Assert(iSegReg < X86_SREG_COUNT);
9322
9323 /*
9324 * 64-bit mode is simpler.
9325 */
9326 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9327 {
9328 if (iSegReg >= X86_SREG_FS)
9329 {
9330 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9331 GCPtrMem += pSel->u64Base;
9332 }
9333
9334 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9335 return GCPtrMem;
9336 }
9337 /*
9338 * 16-bit and 32-bit segmentation.
9339 */
9340 else
9341 {
9342 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9343 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9344 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9345 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9346 {
9347 /* expand up */
9348 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9349 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9350 && GCPtrLast32 > (uint32_t)GCPtrMem))
9351 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9352 }
9353 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9354 {
9355 /* expand down */
9356 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9357 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9358 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9359 && GCPtrLast32 > (uint32_t)GCPtrMem))
9360 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9361 }
9362 else
9363 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9364 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9365 }
9366 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9367}
9368
9369
9370/**
9371 * Fetches a data dword, longjmp on error, fallback/safe version.
9372 *
9373 * @returns The dword
9374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9375 * @param iSegReg The index of the segment register to use for
9376 * this access. The base and limits are checked.
9377 * @param GCPtrMem The address of the guest memory.
9378 */
9379IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9380{
9381 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9382 uint32_t const u32Ret = *pu32Src;
9383 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9384 return u32Ret;
9385}
9386
9387
9388/**
9389 * Fetches a data dword, longjmp on error.
9390 *
9391 * @returns The dword
9392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9393 * @param iSegReg The index of the segment register to use for
9394 * this access. The base and limits are checked.
9395 * @param GCPtrMem The address of the guest memory.
9396 */
9397DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9398{
9399# ifdef IEM_WITH_DATA_TLB
9400 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9401 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9402 {
9403 /// @todo more later.
9404 }
9405
9406 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9407# else
9408 /* The lazy approach. */
9409 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9410 uint32_t const u32Ret = *pu32Src;
9411 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9412 return u32Ret;
9413# endif
9414}
9415#endif
9416
9417
9418#ifdef SOME_UNUSED_FUNCTION
9419/**
9420 * Fetches a data dword and sign extends it to a qword.
9421 *
9422 * @returns Strict VBox status code.
9423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9424 * @param pu64Dst Where to return the sign extended value.
9425 * @param iSegReg The index of the segment register to use for
9426 * this access. The base and limits are checked.
9427 * @param GCPtrMem The address of the guest memory.
9428 */
9429IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9430{
9431 /* The lazy approach for now... */
9432 int32_t const *pi32Src;
9433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9434 if (rc == VINF_SUCCESS)
9435 {
9436 *pu64Dst = *pi32Src;
9437 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9438 }
9439#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9440 else
9441 *pu64Dst = 0;
9442#endif
9443 return rc;
9444}
9445#endif
9446
9447
9448/**
9449 * Fetches a data qword.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param pu64Dst Where to return the qword.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 uint64_t const *pu64Src;
9462 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9463 if (rc == VINF_SUCCESS)
9464 {
9465 *pu64Dst = *pu64Src;
9466 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9467 }
9468 return rc;
9469}
9470
9471
9472#ifdef IEM_WITH_SETJMP
9473/**
9474 * Fetches a data qword, longjmp on error.
9475 *
9476 * @returns The qword.
9477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9478 * @param iSegReg The index of the segment register to use for
9479 * this access. The base and limits are checked.
9480 * @param GCPtrMem The address of the guest memory.
9481 */
9482DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9483{
9484 /* The lazy approach for now... */
9485 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9486 uint64_t const u64Ret = *pu64Src;
9487 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9488 return u64Ret;
9489}
9490#endif
9491
9492
9493/**
9494 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9495 *
9496 * @returns Strict VBox status code.
9497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9498 * @param pu64Dst Where to return the qword.
9499 * @param iSegReg The index of the segment register to use for
9500 * this access. The base and limits are checked.
9501 * @param GCPtrMem The address of the guest memory.
9502 */
9503IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9504{
9505 /* The lazy approach for now... */
9506 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9507 if (RT_UNLIKELY(GCPtrMem & 15))
9508 return iemRaiseGeneralProtectionFault0(pVCpu);
9509
9510 uint64_t const *pu64Src;
9511 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9512 if (rc == VINF_SUCCESS)
9513 {
9514 *pu64Dst = *pu64Src;
9515 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9516 }
9517 return rc;
9518}
9519
9520
9521#ifdef IEM_WITH_SETJMP
9522/**
9523 * Fetches a data qword, longjmp on error.
9524 *
9525 * @returns The qword.
9526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9527 * @param iSegReg The index of the segment register to use for
9528 * this access. The base and limits are checked.
9529 * @param GCPtrMem The address of the guest memory.
9530 */
9531DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9532{
9533 /* The lazy approach for now... */
9534 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9535 if (RT_LIKELY(!(GCPtrMem & 15)))
9536 {
9537 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9538 uint64_t const u64Ret = *pu64Src;
9539 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9540 return u64Ret;
9541 }
9542
9543 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9544 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9545}
9546#endif
9547
9548
9549/**
9550 * Fetches a data tword.
9551 *
9552 * @returns Strict VBox status code.
9553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9554 * @param pr80Dst Where to return the tword.
9555 * @param iSegReg The index of the segment register to use for
9556 * this access. The base and limits are checked.
9557 * @param GCPtrMem The address of the guest memory.
9558 */
9559IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9560{
9561 /* The lazy approach for now... */
9562 PCRTFLOAT80U pr80Src;
9563 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9564 if (rc == VINF_SUCCESS)
9565 {
9566 *pr80Dst = *pr80Src;
9567 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9568 }
9569 return rc;
9570}
9571
9572
9573#ifdef IEM_WITH_SETJMP
9574/**
9575 * Fetches a data tword, longjmp on error.
9576 *
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param pr80Dst Where to return the tword.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 */
9583DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9584{
9585 /* The lazy approach for now... */
9586 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9587 *pr80Dst = *pr80Src;
9588 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9589}
9590#endif
9591
9592
9593/**
9594 * Fetches a data dqword (double qword), generally SSE related.
9595 *
9596 * @returns Strict VBox status code.
9597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9598 * @param pu128Dst Where to return the qword.
9599 * @param iSegReg The index of the segment register to use for
9600 * this access. The base and limits are checked.
9601 * @param GCPtrMem The address of the guest memory.
9602 */
9603IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9604{
9605 /* The lazy approach for now... */
9606 PCRTUINT128U pu128Src;
9607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9608 if (rc == VINF_SUCCESS)
9609 {
9610 pu128Dst->au64[0] = pu128Src->au64[0];
9611 pu128Dst->au64[1] = pu128Src->au64[1];
9612 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9613 }
9614 return rc;
9615}
9616
9617
9618#ifdef IEM_WITH_SETJMP
9619/**
9620 * Fetches a data dqword (double qword), generally SSE related.
9621 *
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu128Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9632 pu128Dst->au64[0] = pu128Src->au64[0];
9633 pu128Dst->au64[1] = pu128Src->au64[1];
9634 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9635}
9636#endif
9637
9638
9639/**
9640 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9641 * related.
9642 *
9643 * Raises \#GP(0) if not aligned.
9644 *
9645 * @returns Strict VBox status code.
9646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9647 * @param pu128Dst Where to return the qword.
9648 * @param iSegReg The index of the segment register to use for
9649 * this access. The base and limits are checked.
9650 * @param GCPtrMem The address of the guest memory.
9651 */
9652IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9653{
9654 /* The lazy approach for now... */
9655 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9656 if ( (GCPtrMem & 15)
9657 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9658 return iemRaiseGeneralProtectionFault0(pVCpu);
9659
9660 PCRTUINT128U pu128Src;
9661 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9662 if (rc == VINF_SUCCESS)
9663 {
9664 pu128Dst->au64[0] = pu128Src->au64[0];
9665 pu128Dst->au64[1] = pu128Src->au64[1];
9666 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9667 }
9668 return rc;
9669}
9670
9671
9672#ifdef IEM_WITH_SETJMP
9673/**
9674 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9675 * related, longjmp on error.
9676 *
9677 * Raises \#GP(0) if not aligned.
9678 *
9679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9680 * @param pu128Dst Where to return the qword.
9681 * @param iSegReg The index of the segment register to use for
9682 * this access. The base and limits are checked.
9683 * @param GCPtrMem The address of the guest memory.
9684 */
9685DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9686{
9687 /* The lazy approach for now... */
9688 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9689 if ( (GCPtrMem & 15) == 0
9690 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9691 {
9692 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9693 pu128Dst->au64[0] = pu128Src->au64[0];
9694 pu128Dst->au64[1] = pu128Src->au64[1];
9695 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9696 return;
9697 }
9698
9699 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9700 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9701}
9702#endif
9703
9704
9705/**
9706 * Fetches a data oword (octo word), generally AVX related.
9707 *
9708 * @returns Strict VBox status code.
9709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9710 * @param pu256Dst Where to return the qword.
9711 * @param iSegReg The index of the segment register to use for
9712 * this access. The base and limits are checked.
9713 * @param GCPtrMem The address of the guest memory.
9714 */
9715IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9716{
9717 /* The lazy approach for now... */
9718 PCRTUINT256U pu256Src;
9719 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 if (rc == VINF_SUCCESS)
9721 {
9722 pu256Dst->au64[0] = pu256Src->au64[0];
9723 pu256Dst->au64[1] = pu256Src->au64[1];
9724 pu256Dst->au64[2] = pu256Src->au64[2];
9725 pu256Dst->au64[3] = pu256Src->au64[3];
9726 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9727 }
9728 return rc;
9729}
9730
9731
9732#ifdef IEM_WITH_SETJMP
9733/**
9734 * Fetches a data oword (octo word), generally AVX related.
9735 *
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param pu256Dst Where to return the qword.
9738 * @param iSegReg The index of the segment register to use for
9739 * this access. The base and limits are checked.
9740 * @param GCPtrMem The address of the guest memory.
9741 */
9742IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9743{
9744 /* The lazy approach for now... */
9745 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9746 pu256Dst->au64[0] = pu256Src->au64[0];
9747 pu256Dst->au64[1] = pu256Src->au64[1];
9748 pu256Dst->au64[2] = pu256Src->au64[2];
9749 pu256Dst->au64[3] = pu256Src->au64[3];
9750 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9751}
9752#endif
9753
9754
9755/**
9756 * Fetches a data oword (octo word) at an aligned address, generally AVX
9757 * related.
9758 *
9759 * Raises \#GP(0) if not aligned.
9760 *
9761 * @returns Strict VBox status code.
9762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9763 * @param pu256Dst Where to return the qword.
9764 * @param iSegReg The index of the segment register to use for
9765 * this access. The base and limits are checked.
9766 * @param GCPtrMem The address of the guest memory.
9767 */
9768IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9769{
9770 /* The lazy approach for now... */
9771 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9772 if (GCPtrMem & 31)
9773 return iemRaiseGeneralProtectionFault0(pVCpu);
9774
9775 PCRTUINT256U pu256Src;
9776 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9777 if (rc == VINF_SUCCESS)
9778 {
9779 pu256Dst->au64[0] = pu256Src->au64[0];
9780 pu256Dst->au64[1] = pu256Src->au64[1];
9781 pu256Dst->au64[2] = pu256Src->au64[2];
9782 pu256Dst->au64[3] = pu256Src->au64[3];
9783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9784 }
9785 return rc;
9786}
9787
9788
9789#ifdef IEM_WITH_SETJMP
9790/**
9791 * Fetches a data oword (octo word) at an aligned address, generally AVX
9792 * related, longjmp on error.
9793 *
9794 * Raises \#GP(0) if not aligned.
9795 *
9796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9797 * @param pu256Dst Where to return the qword.
9798 * @param iSegReg The index of the segment register to use for
9799 * this access. The base and limits are checked.
9800 * @param GCPtrMem The address of the guest memory.
9801 */
9802DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9803{
9804 /* The lazy approach for now... */
9805 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9806 if ((GCPtrMem & 31) == 0)
9807 {
9808 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9809 pu256Dst->au64[0] = pu256Src->au64[0];
9810 pu256Dst->au64[1] = pu256Src->au64[1];
9811 pu256Dst->au64[2] = pu256Src->au64[2];
9812 pu256Dst->au64[3] = pu256Src->au64[3];
9813 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9814 return;
9815 }
9816
9817 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9818 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9819}
9820#endif
9821
9822
9823
9824/**
9825 * Fetches a descriptor register (lgdt, lidt).
9826 *
9827 * @returns Strict VBox status code.
9828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9829 * @param pcbLimit Where to return the limit.
9830 * @param pGCPtrBase Where to return the base.
9831 * @param iSegReg The index of the segment register to use for
9832 * this access. The base and limits are checked.
9833 * @param GCPtrMem The address of the guest memory.
9834 * @param enmOpSize The effective operand size.
9835 */
9836IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9837 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9838{
9839 /*
9840 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9841 * little special:
9842 * - The two reads are done separately.
9843 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9844 * - We suspect the 386 to actually commit the limit before the base in
9845 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9846 * don't try emulate this eccentric behavior, because it's not well
9847 * enough understood and rather hard to trigger.
9848 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9849 */
9850 VBOXSTRICTRC rcStrict;
9851 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9852 {
9853 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9854 if (rcStrict == VINF_SUCCESS)
9855 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9856 }
9857 else
9858 {
9859 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9860 if (enmOpSize == IEMMODE_32BIT)
9861 {
9862 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9863 {
9864 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9865 if (rcStrict == VINF_SUCCESS)
9866 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9867 }
9868 else
9869 {
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9871 if (rcStrict == VINF_SUCCESS)
9872 {
9873 *pcbLimit = (uint16_t)uTmp;
9874 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9875 }
9876 }
9877 if (rcStrict == VINF_SUCCESS)
9878 *pGCPtrBase = uTmp;
9879 }
9880 else
9881 {
9882 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9883 if (rcStrict == VINF_SUCCESS)
9884 {
9885 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9886 if (rcStrict == VINF_SUCCESS)
9887 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9888 }
9889 }
9890 }
9891 return rcStrict;
9892}
9893
9894
9895
9896/**
9897 * Stores a data byte.
9898 *
9899 * @returns Strict VBox status code.
9900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9901 * @param iSegReg The index of the segment register to use for
9902 * this access. The base and limits are checked.
9903 * @param GCPtrMem The address of the guest memory.
9904 * @param u8Value The value to store.
9905 */
9906IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9907{
9908 /* The lazy approach for now... */
9909 uint8_t *pu8Dst;
9910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9911 if (rc == VINF_SUCCESS)
9912 {
9913 *pu8Dst = u8Value;
9914 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9915 }
9916 return rc;
9917}
9918
9919
9920#ifdef IEM_WITH_SETJMP
9921/**
9922 * Stores a data byte, longjmp on error.
9923 *
9924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9925 * @param iSegReg The index of the segment register to use for
9926 * this access. The base and limits are checked.
9927 * @param GCPtrMem The address of the guest memory.
9928 * @param u8Value The value to store.
9929 */
9930IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9931{
9932 /* The lazy approach for now... */
9933 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9934 *pu8Dst = u8Value;
9935 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9936}
9937#endif
9938
9939
9940/**
9941 * Stores a data word.
9942 *
9943 * @returns Strict VBox status code.
9944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9945 * @param iSegReg The index of the segment register to use for
9946 * this access. The base and limits are checked.
9947 * @param GCPtrMem The address of the guest memory.
9948 * @param u16Value The value to store.
9949 */
9950IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9951{
9952 /* The lazy approach for now... */
9953 uint16_t *pu16Dst;
9954 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9955 if (rc == VINF_SUCCESS)
9956 {
9957 *pu16Dst = u16Value;
9958 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9959 }
9960 return rc;
9961}
9962
9963
9964#ifdef IEM_WITH_SETJMP
9965/**
9966 * Stores a data word, longjmp on error.
9967 *
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param iSegReg The index of the segment register to use for
9970 * this access. The base and limits are checked.
9971 * @param GCPtrMem The address of the guest memory.
9972 * @param u16Value The value to store.
9973 */
9974IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9975{
9976 /* The lazy approach for now... */
9977 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9978 *pu16Dst = u16Value;
9979 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9980}
9981#endif
9982
9983
9984/**
9985 * Stores a data dword.
9986 *
9987 * @returns Strict VBox status code.
9988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9989 * @param iSegReg The index of the segment register to use for
9990 * this access. The base and limits are checked.
9991 * @param GCPtrMem The address of the guest memory.
9992 * @param u32Value The value to store.
9993 */
9994IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9995{
9996 /* The lazy approach for now... */
9997 uint32_t *pu32Dst;
9998 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9999 if (rc == VINF_SUCCESS)
10000 {
10001 *pu32Dst = u32Value;
10002 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10003 }
10004 return rc;
10005}
10006
10007
10008#ifdef IEM_WITH_SETJMP
10009/**
10010 * Stores a data dword.
10011 *
10012 * @returns Strict VBox status code.
10013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10014 * @param iSegReg The index of the segment register to use for
10015 * this access. The base and limits are checked.
10016 * @param GCPtrMem The address of the guest memory.
10017 * @param u32Value The value to store.
10018 */
10019IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10020{
10021 /* The lazy approach for now... */
10022 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10023 *pu32Dst = u32Value;
10024 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10025}
10026#endif
10027
10028
10029/**
10030 * Stores a data qword.
10031 *
10032 * @returns Strict VBox status code.
10033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10034 * @param iSegReg The index of the segment register to use for
10035 * this access. The base and limits are checked.
10036 * @param GCPtrMem The address of the guest memory.
10037 * @param u64Value The value to store.
10038 */
10039IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10040{
10041 /* The lazy approach for now... */
10042 uint64_t *pu64Dst;
10043 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10044 if (rc == VINF_SUCCESS)
10045 {
10046 *pu64Dst = u64Value;
10047 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10048 }
10049 return rc;
10050}
10051
10052
10053#ifdef IEM_WITH_SETJMP
10054/**
10055 * Stores a data qword, longjmp on error.
10056 *
10057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10058 * @param iSegReg The index of the segment register to use for
10059 * this access. The base and limits are checked.
10060 * @param GCPtrMem The address of the guest memory.
10061 * @param u64Value The value to store.
10062 */
10063IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10064{
10065 /* The lazy approach for now... */
10066 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10067 *pu64Dst = u64Value;
10068 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10069}
10070#endif
10071
10072
10073/**
10074 * Stores a data dqword.
10075 *
10076 * @returns Strict VBox status code.
10077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10078 * @param iSegReg The index of the segment register to use for
10079 * this access. The base and limits are checked.
10080 * @param GCPtrMem The address of the guest memory.
10081 * @param u128Value The value to store.
10082 */
10083IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10084{
10085 /* The lazy approach for now... */
10086 PRTUINT128U pu128Dst;
10087 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10088 if (rc == VINF_SUCCESS)
10089 {
10090 pu128Dst->au64[0] = u128Value.au64[0];
10091 pu128Dst->au64[1] = u128Value.au64[1];
10092 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10093 }
10094 return rc;
10095}
10096
10097
10098#ifdef IEM_WITH_SETJMP
10099/**
10100 * Stores a data dqword, longjmp on error.
10101 *
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param iSegReg The index of the segment register to use for
10104 * this access. The base and limits are checked.
10105 * @param GCPtrMem The address of the guest memory.
10106 * @param u128Value The value to store.
10107 */
10108IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10109{
10110 /* The lazy approach for now... */
10111 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10112 pu128Dst->au64[0] = u128Value.au64[0];
10113 pu128Dst->au64[1] = u128Value.au64[1];
10114 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10115}
10116#endif
10117
10118
10119/**
10120 * Stores a data dqword, SSE aligned.
10121 *
10122 * @returns Strict VBox status code.
10123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10124 * @param iSegReg The index of the segment register to use for
10125 * this access. The base and limits are checked.
10126 * @param GCPtrMem The address of the guest memory.
10127 * @param u128Value The value to store.
10128 */
10129IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10130{
10131 /* The lazy approach for now... */
10132 if ( (GCPtrMem & 15)
10133 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10134 return iemRaiseGeneralProtectionFault0(pVCpu);
10135
10136 PRTUINT128U pu128Dst;
10137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10138 if (rc == VINF_SUCCESS)
10139 {
10140 pu128Dst->au64[0] = u128Value.au64[0];
10141 pu128Dst->au64[1] = u128Value.au64[1];
10142 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10143 }
10144 return rc;
10145}
10146
10147
10148#ifdef IEM_WITH_SETJMP
10149/**
10150 * Stores a data dqword, SSE aligned.
10151 *
10152 * @returns Strict VBox status code.
10153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10154 * @param iSegReg The index of the segment register to use for
10155 * this access. The base and limits are checked.
10156 * @param GCPtrMem The address of the guest memory.
10157 * @param u128Value The value to store.
10158 */
10159DECL_NO_INLINE(IEM_STATIC, void)
10160iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10161{
10162 /* The lazy approach for now... */
10163 if ( (GCPtrMem & 15) == 0
10164 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10165 {
10166 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10167 pu128Dst->au64[0] = u128Value.au64[0];
10168 pu128Dst->au64[1] = u128Value.au64[1];
10169 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10170 return;
10171 }
10172
10173 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10174 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10175}
10176#endif
10177
10178
10179/**
10180 * Stores a data dqword.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10184 * @param iSegReg The index of the segment register to use for
10185 * this access. The base and limits are checked.
10186 * @param GCPtrMem The address of the guest memory.
10187 * @param pu256Value Pointer to the value to store.
10188 */
10189IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10190{
10191 /* The lazy approach for now... */
10192 PRTUINT256U pu256Dst;
10193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10194 if (rc == VINF_SUCCESS)
10195 {
10196 pu256Dst->au64[0] = pu256Value->au64[0];
10197 pu256Dst->au64[1] = pu256Value->au64[1];
10198 pu256Dst->au64[2] = pu256Value->au64[2];
10199 pu256Dst->au64[3] = pu256Value->au64[3];
10200 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10201 }
10202 return rc;
10203}
10204
10205
10206#ifdef IEM_WITH_SETJMP
10207/**
10208 * Stores a data dqword, longjmp on error.
10209 *
10210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10211 * @param iSegReg The index of the segment register to use for
10212 * this access. The base and limits are checked.
10213 * @param GCPtrMem The address of the guest memory.
10214 * @param pu256Value Pointer to the value to store.
10215 */
10216IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10217{
10218 /* The lazy approach for now... */
10219 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10220 pu256Dst->au64[0] = pu256Value->au64[0];
10221 pu256Dst->au64[1] = pu256Value->au64[1];
10222 pu256Dst->au64[2] = pu256Value->au64[2];
10223 pu256Dst->au64[3] = pu256Value->au64[3];
10224 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10225}
10226#endif
10227
10228
10229/**
10230 * Stores a data dqword, AVX aligned.
10231 *
10232 * @returns Strict VBox status code.
10233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10234 * @param iSegReg The index of the segment register to use for
10235 * this access. The base and limits are checked.
10236 * @param GCPtrMem The address of the guest memory.
10237 * @param pu256Value Pointer to the value to store.
10238 */
10239IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10240{
10241 /* The lazy approach for now... */
10242 if (GCPtrMem & 31)
10243 return iemRaiseGeneralProtectionFault0(pVCpu);
10244
10245 PRTUINT256U pu256Dst;
10246 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10247 if (rc == VINF_SUCCESS)
10248 {
10249 pu256Dst->au64[0] = pu256Value->au64[0];
10250 pu256Dst->au64[1] = pu256Value->au64[1];
10251 pu256Dst->au64[2] = pu256Value->au64[2];
10252 pu256Dst->au64[3] = pu256Value->au64[3];
10253 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10254 }
10255 return rc;
10256}
10257
10258
10259#ifdef IEM_WITH_SETJMP
10260/**
10261 * Stores a data dqword, AVX aligned.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param iSegReg The index of the segment register to use for
10266 * this access. The base and limits are checked.
10267 * @param GCPtrMem The address of the guest memory.
10268 * @param pu256Value Pointer to the value to store.
10269 */
10270DECL_NO_INLINE(IEM_STATIC, void)
10271iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10272{
10273 /* The lazy approach for now... */
10274 if ((GCPtrMem & 31) == 0)
10275 {
10276 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10277 pu256Dst->au64[0] = pu256Value->au64[0];
10278 pu256Dst->au64[1] = pu256Value->au64[1];
10279 pu256Dst->au64[2] = pu256Value->au64[2];
10280 pu256Dst->au64[3] = pu256Value->au64[3];
10281 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10282 return;
10283 }
10284
10285 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10286 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10287}
10288#endif
10289
10290
10291/**
10292 * Stores a descriptor register (sgdt, sidt).
10293 *
10294 * @returns Strict VBox status code.
10295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10296 * @param cbLimit The limit.
10297 * @param GCPtrBase The base address.
10298 * @param iSegReg The index of the segment register to use for
10299 * this access. The base and limits are checked.
10300 * @param GCPtrMem The address of the guest memory.
10301 */
10302IEM_STATIC VBOXSTRICTRC
10303iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10304{
10305 /*
10306 * The SIDT and SGDT instructions actually stores the data using two
10307 * independent writes. The instructions does not respond to opsize prefixes.
10308 */
10309 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10310 if (rcStrict == VINF_SUCCESS)
10311 {
10312 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10313 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10314 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10315 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10316 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10317 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10318 else
10319 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10320 }
10321 return rcStrict;
10322}
10323
10324
10325/**
10326 * Pushes a word onto the stack.
10327 *
10328 * @returns Strict VBox status code.
10329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10330 * @param u16Value The value to push.
10331 */
10332IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10333{
10334 /* Increment the stack pointer. */
10335 uint64_t uNewRsp;
10336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10337 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10338
10339 /* Write the word the lazy way. */
10340 uint16_t *pu16Dst;
10341 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10342 if (rc == VINF_SUCCESS)
10343 {
10344 *pu16Dst = u16Value;
10345 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10346 }
10347
10348 /* Commit the new RSP value unless we an access handler made trouble. */
10349 if (rc == VINF_SUCCESS)
10350 pCtx->rsp = uNewRsp;
10351
10352 return rc;
10353}
10354
10355
10356/**
10357 * Pushes a dword onto the stack.
10358 *
10359 * @returns Strict VBox status code.
10360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10361 * @param u32Value The value to push.
10362 */
10363IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10364{
10365 /* Increment the stack pointer. */
10366 uint64_t uNewRsp;
10367 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10368 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10369
10370 /* Write the dword the lazy way. */
10371 uint32_t *pu32Dst;
10372 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10373 if (rc == VINF_SUCCESS)
10374 {
10375 *pu32Dst = u32Value;
10376 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10377 }
10378
10379 /* Commit the new RSP value unless we an access handler made trouble. */
10380 if (rc == VINF_SUCCESS)
10381 pCtx->rsp = uNewRsp;
10382
10383 return rc;
10384}
10385
10386
10387/**
10388 * Pushes a dword segment register value onto the stack.
10389 *
10390 * @returns Strict VBox status code.
10391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10392 * @param u32Value The value to push.
10393 */
10394IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10395{
10396 /* Increment the stack pointer. */
10397 uint64_t uNewRsp;
10398 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10399 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10400
10401 VBOXSTRICTRC rc;
10402 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10403 {
10404 /* The recompiler writes a full dword. */
10405 uint32_t *pu32Dst;
10406 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10407 if (rc == VINF_SUCCESS)
10408 {
10409 *pu32Dst = u32Value;
10410 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10411 }
10412 }
10413 else
10414 {
10415 /* The intel docs talks about zero extending the selector register
10416 value. My actual intel CPU here might be zero extending the value
10417 but it still only writes the lower word... */
10418 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10419 * happens when crossing an electric page boundrary, is the high word checked
10420 * for write accessibility or not? Probably it is. What about segment limits?
10421 * It appears this behavior is also shared with trap error codes.
10422 *
10423 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10424 * ancient hardware when it actually did change. */
10425 uint16_t *pu16Dst;
10426 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10427 if (rc == VINF_SUCCESS)
10428 {
10429 *pu16Dst = (uint16_t)u32Value;
10430 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10431 }
10432 }
10433
10434 /* Commit the new RSP value unless we an access handler made trouble. */
10435 if (rc == VINF_SUCCESS)
10436 pCtx->rsp = uNewRsp;
10437
10438 return rc;
10439}
10440
10441
10442/**
10443 * Pushes a qword onto the stack.
10444 *
10445 * @returns Strict VBox status code.
10446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10447 * @param u64Value The value to push.
10448 */
10449IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10450{
10451 /* Increment the stack pointer. */
10452 uint64_t uNewRsp;
10453 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10454 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10455
10456 /* Write the word the lazy way. */
10457 uint64_t *pu64Dst;
10458 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10459 if (rc == VINF_SUCCESS)
10460 {
10461 *pu64Dst = u64Value;
10462 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10463 }
10464
10465 /* Commit the new RSP value unless we an access handler made trouble. */
10466 if (rc == VINF_SUCCESS)
10467 pCtx->rsp = uNewRsp;
10468
10469 return rc;
10470}
10471
10472
10473/**
10474 * Pops a word from the stack.
10475 *
10476 * @returns Strict VBox status code.
10477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10478 * @param pu16Value Where to store the popped value.
10479 */
10480IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10481{
10482 /* Increment the stack pointer. */
10483 uint64_t uNewRsp;
10484 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10485 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10486
10487 /* Write the word the lazy way. */
10488 uint16_t const *pu16Src;
10489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10490 if (rc == VINF_SUCCESS)
10491 {
10492 *pu16Value = *pu16Src;
10493 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10494
10495 /* Commit the new RSP value. */
10496 if (rc == VINF_SUCCESS)
10497 pCtx->rsp = uNewRsp;
10498 }
10499
10500 return rc;
10501}
10502
10503
10504/**
10505 * Pops a dword from the stack.
10506 *
10507 * @returns Strict VBox status code.
10508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10509 * @param pu32Value Where to store the popped value.
10510 */
10511IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10512{
10513 /* Increment the stack pointer. */
10514 uint64_t uNewRsp;
10515 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10516 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10517
10518 /* Write the word the lazy way. */
10519 uint32_t const *pu32Src;
10520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10521 if (rc == VINF_SUCCESS)
10522 {
10523 *pu32Value = *pu32Src;
10524 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10525
10526 /* Commit the new RSP value. */
10527 if (rc == VINF_SUCCESS)
10528 pCtx->rsp = uNewRsp;
10529 }
10530
10531 return rc;
10532}
10533
10534
10535/**
10536 * Pops a qword from the stack.
10537 *
10538 * @returns Strict VBox status code.
10539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10540 * @param pu64Value Where to store the popped value.
10541 */
10542IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10543{
10544 /* Increment the stack pointer. */
10545 uint64_t uNewRsp;
10546 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10547 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10548
10549 /* Write the word the lazy way. */
10550 uint64_t const *pu64Src;
10551 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10552 if (rc == VINF_SUCCESS)
10553 {
10554 *pu64Value = *pu64Src;
10555 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10556
10557 /* Commit the new RSP value. */
10558 if (rc == VINF_SUCCESS)
10559 pCtx->rsp = uNewRsp;
10560 }
10561
10562 return rc;
10563}
10564
10565
10566/**
10567 * Pushes a word onto the stack, using a temporary stack pointer.
10568 *
10569 * @returns Strict VBox status code.
10570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10571 * @param u16Value The value to push.
10572 * @param pTmpRsp Pointer to the temporary stack pointer.
10573 */
10574IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10575{
10576 /* Increment the stack pointer. */
10577 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10578 RTUINT64U NewRsp = *pTmpRsp;
10579 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10580
10581 /* Write the word the lazy way. */
10582 uint16_t *pu16Dst;
10583 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10584 if (rc == VINF_SUCCESS)
10585 {
10586 *pu16Dst = u16Value;
10587 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10588 }
10589
10590 /* Commit the new RSP value unless we an access handler made trouble. */
10591 if (rc == VINF_SUCCESS)
10592 *pTmpRsp = NewRsp;
10593
10594 return rc;
10595}
10596
10597
10598/**
10599 * Pushes a dword onto the stack, using a temporary stack pointer.
10600 *
10601 * @returns Strict VBox status code.
10602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10603 * @param u32Value The value to push.
10604 * @param pTmpRsp Pointer to the temporary stack pointer.
10605 */
10606IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10607{
10608 /* Increment the stack pointer. */
10609 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10610 RTUINT64U NewRsp = *pTmpRsp;
10611 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10612
10613 /* Write the word the lazy way. */
10614 uint32_t *pu32Dst;
10615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10616 if (rc == VINF_SUCCESS)
10617 {
10618 *pu32Dst = u32Value;
10619 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10620 }
10621
10622 /* Commit the new RSP value unless we an access handler made trouble. */
10623 if (rc == VINF_SUCCESS)
10624 *pTmpRsp = NewRsp;
10625
10626 return rc;
10627}
10628
10629
10630/**
10631 * Pushes a dword onto the stack, using a temporary stack pointer.
10632 *
10633 * @returns Strict VBox status code.
10634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10635 * @param u64Value The value to push.
10636 * @param pTmpRsp Pointer to the temporary stack pointer.
10637 */
10638IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10639{
10640 /* Increment the stack pointer. */
10641 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10642 RTUINT64U NewRsp = *pTmpRsp;
10643 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10644
10645 /* Write the word the lazy way. */
10646 uint64_t *pu64Dst;
10647 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10648 if (rc == VINF_SUCCESS)
10649 {
10650 *pu64Dst = u64Value;
10651 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10652 }
10653
10654 /* Commit the new RSP value unless we an access handler made trouble. */
10655 if (rc == VINF_SUCCESS)
10656 *pTmpRsp = NewRsp;
10657
10658 return rc;
10659}
10660
10661
10662/**
10663 * Pops a word from the stack, using a temporary stack pointer.
10664 *
10665 * @returns Strict VBox status code.
10666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10667 * @param pu16Value Where to store the popped value.
10668 * @param pTmpRsp Pointer to the temporary stack pointer.
10669 */
10670IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10671{
10672 /* Increment the stack pointer. */
10673 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10674 RTUINT64U NewRsp = *pTmpRsp;
10675 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10676
10677 /* Write the word the lazy way. */
10678 uint16_t const *pu16Src;
10679 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10680 if (rc == VINF_SUCCESS)
10681 {
10682 *pu16Value = *pu16Src;
10683 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10684
10685 /* Commit the new RSP value. */
10686 if (rc == VINF_SUCCESS)
10687 *pTmpRsp = NewRsp;
10688 }
10689
10690 return rc;
10691}
10692
10693
10694/**
10695 * Pops a dword from the stack, using a temporary stack pointer.
10696 *
10697 * @returns Strict VBox status code.
10698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10699 * @param pu32Value Where to store the popped value.
10700 * @param pTmpRsp Pointer to the temporary stack pointer.
10701 */
10702IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10703{
10704 /* Increment the stack pointer. */
10705 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10706 RTUINT64U NewRsp = *pTmpRsp;
10707 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10708
10709 /* Write the word the lazy way. */
10710 uint32_t const *pu32Src;
10711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10712 if (rc == VINF_SUCCESS)
10713 {
10714 *pu32Value = *pu32Src;
10715 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10716
10717 /* Commit the new RSP value. */
10718 if (rc == VINF_SUCCESS)
10719 *pTmpRsp = NewRsp;
10720 }
10721
10722 return rc;
10723}
10724
10725
10726/**
10727 * Pops a qword from the stack, using a temporary stack pointer.
10728 *
10729 * @returns Strict VBox status code.
10730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10731 * @param pu64Value Where to store the popped value.
10732 * @param pTmpRsp Pointer to the temporary stack pointer.
10733 */
10734IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10735{
10736 /* Increment the stack pointer. */
10737 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10738 RTUINT64U NewRsp = *pTmpRsp;
10739 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10740
10741 /* Write the word the lazy way. */
10742 uint64_t const *pu64Src;
10743 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10744 if (rcStrict == VINF_SUCCESS)
10745 {
10746 *pu64Value = *pu64Src;
10747 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10748
10749 /* Commit the new RSP value. */
10750 if (rcStrict == VINF_SUCCESS)
10751 *pTmpRsp = NewRsp;
10752 }
10753
10754 return rcStrict;
10755}
10756
10757
10758/**
10759 * Begin a special stack push (used by interrupt, exceptions and such).
10760 *
10761 * This will raise \#SS or \#PF if appropriate.
10762 *
10763 * @returns Strict VBox status code.
10764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10765 * @param cbMem The number of bytes to push onto the stack.
10766 * @param ppvMem Where to return the pointer to the stack memory.
10767 * As with the other memory functions this could be
10768 * direct access or bounce buffered access, so
10769 * don't commit register until the commit call
10770 * succeeds.
10771 * @param puNewRsp Where to return the new RSP value. This must be
10772 * passed unchanged to
10773 * iemMemStackPushCommitSpecial().
10774 */
10775IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10776{
10777 Assert(cbMem < UINT8_MAX);
10778 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10779 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10780 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10781}
10782
10783
10784/**
10785 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10786 *
10787 * This will update the rSP.
10788 *
10789 * @returns Strict VBox status code.
10790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10791 * @param pvMem The pointer returned by
10792 * iemMemStackPushBeginSpecial().
10793 * @param uNewRsp The new RSP value returned by
10794 * iemMemStackPushBeginSpecial().
10795 */
10796IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10797{
10798 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10799 if (rcStrict == VINF_SUCCESS)
10800 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10801 return rcStrict;
10802}
10803
10804
10805/**
10806 * Begin a special stack pop (used by iret, retf and such).
10807 *
10808 * This will raise \#SS or \#PF if appropriate.
10809 *
10810 * @returns Strict VBox status code.
10811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10812 * @param cbMem The number of bytes to pop from the stack.
10813 * @param ppvMem Where to return the pointer to the stack memory.
10814 * @param puNewRsp Where to return the new RSP value. This must be
10815 * assigned to CPUMCTX::rsp manually some time
10816 * after iemMemStackPopDoneSpecial() has been
10817 * called.
10818 */
10819IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10820{
10821 Assert(cbMem < UINT8_MAX);
10822 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10823 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10824 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10825}
10826
10827
10828/**
10829 * Continue a special stack pop (used by iret and retf).
10830 *
10831 * This will raise \#SS or \#PF if appropriate.
10832 *
10833 * @returns Strict VBox status code.
10834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10835 * @param cbMem The number of bytes to pop from the stack.
10836 * @param ppvMem Where to return the pointer to the stack memory.
10837 * @param puNewRsp Where to return the new RSP value. This must be
10838 * assigned to CPUMCTX::rsp manually some time
10839 * after iemMemStackPopDoneSpecial() has been
10840 * called.
10841 */
10842IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10843{
10844 Assert(cbMem < UINT8_MAX);
10845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10846 RTUINT64U NewRsp;
10847 NewRsp.u = *puNewRsp;
10848 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10849 *puNewRsp = NewRsp.u;
10850 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10851}
10852
10853
10854/**
10855 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10856 * iemMemStackPopContinueSpecial).
10857 *
10858 * The caller will manually commit the rSP.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param pvMem The pointer returned by
10863 * iemMemStackPopBeginSpecial() or
10864 * iemMemStackPopContinueSpecial().
10865 */
10866IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10867{
10868 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10869}
10870
10871
10872/**
10873 * Fetches a system table byte.
10874 *
10875 * @returns Strict VBox status code.
10876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10877 * @param pbDst Where to return the byte.
10878 * @param iSegReg The index of the segment register to use for
10879 * this access. The base and limits are checked.
10880 * @param GCPtrMem The address of the guest memory.
10881 */
10882IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10883{
10884 /* The lazy approach for now... */
10885 uint8_t const *pbSrc;
10886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10887 if (rc == VINF_SUCCESS)
10888 {
10889 *pbDst = *pbSrc;
10890 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10891 }
10892 return rc;
10893}
10894
10895
10896/**
10897 * Fetches a system table word.
10898 *
10899 * @returns Strict VBox status code.
10900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10901 * @param pu16Dst Where to return the word.
10902 * @param iSegReg The index of the segment register to use for
10903 * this access. The base and limits are checked.
10904 * @param GCPtrMem The address of the guest memory.
10905 */
10906IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10907{
10908 /* The lazy approach for now... */
10909 uint16_t const *pu16Src;
10910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10911 if (rc == VINF_SUCCESS)
10912 {
10913 *pu16Dst = *pu16Src;
10914 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10915 }
10916 return rc;
10917}
10918
10919
10920/**
10921 * Fetches a system table dword.
10922 *
10923 * @returns Strict VBox status code.
10924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10925 * @param pu32Dst Where to return the dword.
10926 * @param iSegReg The index of the segment register to use for
10927 * this access. The base and limits are checked.
10928 * @param GCPtrMem The address of the guest memory.
10929 */
10930IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10931{
10932 /* The lazy approach for now... */
10933 uint32_t const *pu32Src;
10934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10935 if (rc == VINF_SUCCESS)
10936 {
10937 *pu32Dst = *pu32Src;
10938 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10939 }
10940 return rc;
10941}
10942
10943
10944/**
10945 * Fetches a system table qword.
10946 *
10947 * @returns Strict VBox status code.
10948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10949 * @param pu64Dst Where to return the qword.
10950 * @param iSegReg The index of the segment register to use for
10951 * this access. The base and limits are checked.
10952 * @param GCPtrMem The address of the guest memory.
10953 */
10954IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10955{
10956 /* The lazy approach for now... */
10957 uint64_t const *pu64Src;
10958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10959 if (rc == VINF_SUCCESS)
10960 {
10961 *pu64Dst = *pu64Src;
10962 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10963 }
10964 return rc;
10965}
10966
10967
10968/**
10969 * Fetches a descriptor table entry with caller specified error code.
10970 *
10971 * @returns Strict VBox status code.
10972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10973 * @param pDesc Where to return the descriptor table entry.
10974 * @param uSel The selector which table entry to fetch.
10975 * @param uXcpt The exception to raise on table lookup error.
10976 * @param uErrorCode The error code associated with the exception.
10977 */
10978IEM_STATIC VBOXSTRICTRC
10979iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10980{
10981 AssertPtr(pDesc);
10982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10983
10984 /** @todo did the 286 require all 8 bytes to be accessible? */
10985 /*
10986 * Get the selector table base and check bounds.
10987 */
10988 RTGCPTR GCPtrBase;
10989 if (uSel & X86_SEL_LDT)
10990 {
10991 if ( !pCtx->ldtr.Attr.n.u1Present
10992 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10993 {
10994 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10995 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10996 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10997 uErrorCode, 0);
10998 }
10999
11000 Assert(pCtx->ldtr.Attr.n.u1Present);
11001 GCPtrBase = pCtx->ldtr.u64Base;
11002 }
11003 else
11004 {
11005 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11006 {
11007 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11008 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11009 uErrorCode, 0);
11010 }
11011 GCPtrBase = pCtx->gdtr.pGdt;
11012 }
11013
11014 /*
11015 * Read the legacy descriptor and maybe the long mode extensions if
11016 * required.
11017 */
11018 VBOXSTRICTRC rcStrict;
11019 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11020 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11021 else
11022 {
11023 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11024 if (rcStrict == VINF_SUCCESS)
11025 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11026 if (rcStrict == VINF_SUCCESS)
11027 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11028 if (rcStrict == VINF_SUCCESS)
11029 pDesc->Legacy.au16[3] = 0;
11030 else
11031 return rcStrict;
11032 }
11033
11034 if (rcStrict == VINF_SUCCESS)
11035 {
11036 if ( !IEM_IS_LONG_MODE(pVCpu)
11037 || pDesc->Legacy.Gen.u1DescType)
11038 pDesc->Long.au64[1] = 0;
11039 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11040 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11041 else
11042 {
11043 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11044 /** @todo is this the right exception? */
11045 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11046 }
11047 }
11048 return rcStrict;
11049}
11050
11051
11052/**
11053 * Fetches a descriptor table entry.
11054 *
11055 * @returns Strict VBox status code.
11056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11057 * @param pDesc Where to return the descriptor table entry.
11058 * @param uSel The selector which table entry to fetch.
11059 * @param uXcpt The exception to raise on table lookup error.
11060 */
11061IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11062{
11063 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11064}
11065
11066
11067/**
11068 * Fakes a long mode stack selector for SS = 0.
11069 *
11070 * @param pDescSs Where to return the fake stack descriptor.
11071 * @param uDpl The DPL we want.
11072 */
11073IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11074{
11075 pDescSs->Long.au64[0] = 0;
11076 pDescSs->Long.au64[1] = 0;
11077 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11078 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11079 pDescSs->Long.Gen.u2Dpl = uDpl;
11080 pDescSs->Long.Gen.u1Present = 1;
11081 pDescSs->Long.Gen.u1Long = 1;
11082}
11083
11084
11085/**
11086 * Marks the selector descriptor as accessed (only non-system descriptors).
11087 *
11088 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11089 * will therefore skip the limit checks.
11090 *
11091 * @returns Strict VBox status code.
11092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11093 * @param uSel The selector.
11094 */
11095IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11096{
11097 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11098
11099 /*
11100 * Get the selector table base and calculate the entry address.
11101 */
11102 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11103 ? pCtx->ldtr.u64Base
11104 : pCtx->gdtr.pGdt;
11105 GCPtr += uSel & X86_SEL_MASK;
11106
11107 /*
11108 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11109 * ugly stuff to avoid this. This will make sure it's an atomic access
11110 * as well more or less remove any question about 8-bit or 32-bit accesss.
11111 */
11112 VBOXSTRICTRC rcStrict;
11113 uint32_t volatile *pu32;
11114 if ((GCPtr & 3) == 0)
11115 {
11116 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11117 GCPtr += 2 + 2;
11118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11119 if (rcStrict != VINF_SUCCESS)
11120 return rcStrict;
11121 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11122 }
11123 else
11124 {
11125 /* The misaligned GDT/LDT case, map the whole thing. */
11126 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11127 if (rcStrict != VINF_SUCCESS)
11128 return rcStrict;
11129 switch ((uintptr_t)pu32 & 3)
11130 {
11131 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11132 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11133 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11134 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11135 }
11136 }
11137
11138 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11139}
11140
11141/** @} */
11142
11143
11144/*
11145 * Include the C/C++ implementation of instruction.
11146 */
11147#include "IEMAllCImpl.cpp.h"
11148
11149
11150
11151/** @name "Microcode" macros.
11152 *
11153 * The idea is that we should be able to use the same code to interpret
11154 * instructions as well as recompiler instructions. Thus this obfuscation.
11155 *
11156 * @{
11157 */
11158#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11159#define IEM_MC_END() }
11160#define IEM_MC_PAUSE() do {} while (0)
11161#define IEM_MC_CONTINUE() do {} while (0)
11162
11163/** Internal macro. */
11164#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11165 do \
11166 { \
11167 VBOXSTRICTRC rcStrict2 = a_Expr; \
11168 if (rcStrict2 != VINF_SUCCESS) \
11169 return rcStrict2; \
11170 } while (0)
11171
11172
11173#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11174#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11175#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11176#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11177#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11178#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11179#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11180#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11181#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11182 do { \
11183 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11184 return iemRaiseDeviceNotAvailable(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11187 do { \
11188 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11189 return iemRaiseDeviceNotAvailable(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11192 do { \
11193 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11194 return iemRaiseMathFault(pVCpu); \
11195 } while (0)
11196#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11197 do { \
11198 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11199 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11200 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11201 return iemRaiseUndefinedOpcode(pVCpu); \
11202 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11203 return iemRaiseDeviceNotAvailable(pVCpu); \
11204 } while (0)
11205#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11206 do { \
11207 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11208 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11209 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11210 return iemRaiseUndefinedOpcode(pVCpu); \
11211 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11212 return iemRaiseDeviceNotAvailable(pVCpu); \
11213 } while (0)
11214#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11215 do { \
11216 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11217 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11218 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11219 return iemRaiseUndefinedOpcode(pVCpu); \
11220 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11221 return iemRaiseDeviceNotAvailable(pVCpu); \
11222 } while (0)
11223#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11224 do { \
11225 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11226 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11227 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11228 return iemRaiseUndefinedOpcode(pVCpu); \
11229 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11230 return iemRaiseDeviceNotAvailable(pVCpu); \
11231 } while (0)
11232#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11233 do { \
11234 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11235 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11236 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11237 return iemRaiseUndefinedOpcode(pVCpu); \
11238 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11239 return iemRaiseDeviceNotAvailable(pVCpu); \
11240 } while (0)
11241#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11242 do { \
11243 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11244 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11246 return iemRaiseUndefinedOpcode(pVCpu); \
11247 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11248 return iemRaiseDeviceNotAvailable(pVCpu); \
11249 } while (0)
11250#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11251 do { \
11252 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11253 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11254 return iemRaiseUndefinedOpcode(pVCpu); \
11255 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11256 return iemRaiseDeviceNotAvailable(pVCpu); \
11257 } while (0)
11258#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11259 do { \
11260 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11261 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11262 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11263 return iemRaiseUndefinedOpcode(pVCpu); \
11264 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11265 return iemRaiseDeviceNotAvailable(pVCpu); \
11266 } while (0)
11267#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11268 do { \
11269 if (pVCpu->iem.s.uCpl != 0) \
11270 return iemRaiseGeneralProtectionFault0(pVCpu); \
11271 } while (0)
11272#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11273 do { \
11274 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11275 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11276 } while (0)
11277#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11278 do { \
11279 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11280 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11281 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11282 return iemRaiseUndefinedOpcode(pVCpu); \
11283 } while (0)
11284#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11285 do { \
11286 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11287 return iemRaiseGeneralProtectionFault0(pVCpu); \
11288 } while (0)
11289
11290
11291#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11292#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11293#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11294#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11295#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11296#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11297#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11298 uint32_t a_Name; \
11299 uint32_t *a_pName = &a_Name
11300#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11301 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11302
11303#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11304#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11305
11306#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11320#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11321#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11322#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11323#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11324#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11325#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11326#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11327#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11328#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11329#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11330#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11331#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11332#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11333#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11334#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11335#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11336#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11337/** @note Not for IOPL or IF testing or modification. */
11338#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11339#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11340#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11341#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11342
11343#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11344#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11345#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11346#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11347#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11348#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11349#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11350#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11351#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11352#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11353#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11354#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11355#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11356 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11357
11358
11359#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11360#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11361/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11362 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11363#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11364#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11365/** @note Not for IOPL or IF testing or modification. */
11366#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11367
11368#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11369#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11370#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11371 do { \
11372 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11373 *pu32Reg += (a_u32Value); \
11374 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11375 } while (0)
11376#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11377
11378#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11379#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11380#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11381 do { \
11382 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11383 *pu32Reg -= (a_u32Value); \
11384 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11385 } while (0)
11386#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11387#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11388
11389#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11390#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11391#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11392#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11393#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11394#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11395#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11396
11397#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11398#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11399#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11400#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11401
11402#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11403#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11404#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11405
11406#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11407#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11408#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11409
11410#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11411#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11412#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11413
11414#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11415#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11416#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11417
11418#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11419
11420#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11421
11422#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11423#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11424#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11425 do { \
11426 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11427 *pu32Reg &= (a_u32Value); \
11428 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11429 } while (0)
11430#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11431
11432#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11433#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11434#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11435 do { \
11436 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11437 *pu32Reg |= (a_u32Value); \
11438 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11439 } while (0)
11440#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11441
11442
11443/** @note Not for IOPL or IF modification. */
11444#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11445/** @note Not for IOPL or IF modification. */
11446#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11447/** @note Not for IOPL or IF modification. */
11448#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11449
11450#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11451
11452/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11453#define IEM_MC_FPU_TO_MMX_MODE() do { \
11454 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11456 } while (0)
11457
11458/** Switches the FPU state from MMX mode (FTW=0xffff). */
11459#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11460 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11461 } while (0)
11462
11463#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11464 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11465#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11466 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11467#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11468 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11469 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11470 } while (0)
11471#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11472 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11473 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11474 } while (0)
11475#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11476 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11477#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11478 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11479#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11480 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11481
11482#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11483 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11484 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11485 } while (0)
11486#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11487 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11488#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11489 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11490#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11491 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11492#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11493 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11494 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11495 } while (0)
11496#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11497 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11498#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11499 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11500 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11501 } while (0)
11502#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11503 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11504#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11505 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11506 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11507 } while (0)
11508#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11509 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11510#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11511 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11512#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11513 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11514#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11515 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11516#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11517 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11518 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11519 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11520 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11521 } while (0)
11522
11523#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11524 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11525 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11526 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11527 } while (0)
11528#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11529 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11530 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11531 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11532 } while (0)
11533#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11534 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11535 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11536 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11537 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11538 } while (0)
11539#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11540 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11541 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11542 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11543 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11544 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11545 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11546 } while (0)
11547
11548#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11549#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11550 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11551 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11552 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11557 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11558 } while (0)
11559#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11560 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11561 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11569 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11570 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11571 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11573 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11575 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11576 } while (0)
11577#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11578 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11579 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11580 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11581 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11584 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11585 } while (0)
11586
11587#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11588 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11589#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11590 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11591#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11592 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11593#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11594 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11595 uintptr_t const iYRegTmp = (a_iYReg); \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11599 } while (0)
11600
11601#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11602 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11603 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11604 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11605 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11606 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11607 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11609 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11610 } while (0)
11611#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11612 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11613 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11614 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11615 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11617 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11618 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11619 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11620 } while (0)
11621#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11622 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11623 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11624 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11625 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11626 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11627 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11628 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11629 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11630 } while (0)
11631
11632#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11633 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11634 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11635 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11636 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11637 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11638 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11640 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11641 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11642 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11643 } while (0)
11644#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11645 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11646 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11647 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11648 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11649 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11650 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11651 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11652 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11653 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11654 } while (0)
11655#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11656 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11657 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11658 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11659 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11660 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11661 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11662 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11663 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11664 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11665 } while (0)
11666#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11667 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11668 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11669 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11670 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11671 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11672 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11673 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11674 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11675 } while (0)
11676
11677#ifndef IEM_WITH_SETJMP
11678# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11680# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11682# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11684#else
11685# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11686 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11687# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11688 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11689# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11690 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11691#endif
11692
11693#ifndef IEM_WITH_SETJMP
11694# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11698# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11700#else
11701# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11702 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11704 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11705# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11706 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11707#endif
11708
11709#ifndef IEM_WITH_SETJMP
11710# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11714# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11716#else
11717# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11720 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11721# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11722 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11723#endif
11724
11725#ifdef SOME_UNUSED_FUNCTION
11726# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11728#endif
11729
11730#ifndef IEM_WITH_SETJMP
11731# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11735# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11737# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11739#else
11740# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11742# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11743 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11744# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11745 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748#endif
11749
11750#ifndef IEM_WITH_SETJMP
11751# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11753# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11757#else
11758# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11763 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11764#endif
11765
11766#ifndef IEM_WITH_SETJMP
11767# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11771#else
11772# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11773 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11774# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11775 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11776#endif
11777
11778#ifndef IEM_WITH_SETJMP
11779# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11781# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11783#else
11784# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11785 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11786# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11787 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11788#endif
11789
11790
11791
11792#ifndef IEM_WITH_SETJMP
11793# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11794 do { \
11795 uint8_t u8Tmp; \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11797 (a_u16Dst) = u8Tmp; \
11798 } while (0)
11799# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11800 do { \
11801 uint8_t u8Tmp; \
11802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11803 (a_u32Dst) = u8Tmp; \
11804 } while (0)
11805# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11806 do { \
11807 uint8_t u8Tmp; \
11808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11809 (a_u64Dst) = u8Tmp; \
11810 } while (0)
11811# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint16_t u16Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u32Dst) = u16Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint16_t u16Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u64Dst) = u16Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint32_t u32Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u64Dst) = u32Tmp; \
11828 } while (0)
11829#else /* IEM_WITH_SETJMP */
11830# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11831 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11832# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11833 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11834# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11835 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11836# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11837 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11838# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11839 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11840# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11841 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11842#endif /* IEM_WITH_SETJMP */
11843
11844#ifndef IEM_WITH_SETJMP
11845# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11846 do { \
11847 uint8_t u8Tmp; \
11848 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11849 (a_u16Dst) = (int8_t)u8Tmp; \
11850 } while (0)
11851# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11852 do { \
11853 uint8_t u8Tmp; \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11855 (a_u32Dst) = (int8_t)u8Tmp; \
11856 } while (0)
11857# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11858 do { \
11859 uint8_t u8Tmp; \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11861 (a_u64Dst) = (int8_t)u8Tmp; \
11862 } while (0)
11863# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11864 do { \
11865 uint16_t u16Tmp; \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11867 (a_u32Dst) = (int16_t)u16Tmp; \
11868 } while (0)
11869# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11870 do { \
11871 uint16_t u16Tmp; \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11873 (a_u64Dst) = (int16_t)u16Tmp; \
11874 } while (0)
11875# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11876 do { \
11877 uint32_t u32Tmp; \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11879 (a_u64Dst) = (int32_t)u32Tmp; \
11880 } while (0)
11881#else /* IEM_WITH_SETJMP */
11882# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11883 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11884# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11885 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11886# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11887 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11888# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11889 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11890# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11891 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11892# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11893 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11894#endif /* IEM_WITH_SETJMP */
11895
11896#ifndef IEM_WITH_SETJMP
11897# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11898 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11899# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11901# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11903# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11905#else
11906# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11907 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11908# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11909 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11910# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11911 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11912# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11913 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11914#endif
11915
11916#ifndef IEM_WITH_SETJMP
11917# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11919# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11920 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11921# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11923# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11925#else
11926# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11927 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11928# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11929 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11930# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11931 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11932# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11933 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11934#endif
11935
11936#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11937#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11938#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11939#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11940#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11941#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11942#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11943 do { \
11944 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11945 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11946 } while (0)
11947
11948#ifndef IEM_WITH_SETJMP
11949# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11951# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11952 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11953#else
11954# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11955 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11956# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11957 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11958#endif
11959
11960#ifndef IEM_WITH_SETJMP
11961# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11962 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11963# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11965#else
11966# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11967 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11968# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11969 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11970#endif
11971
11972
11973#define IEM_MC_PUSH_U16(a_u16Value) \
11974 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11975#define IEM_MC_PUSH_U32(a_u32Value) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11977#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11979#define IEM_MC_PUSH_U64(a_u64Value) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11981
11982#define IEM_MC_POP_U16(a_pu16Value) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11984#define IEM_MC_POP_U32(a_pu32Value) \
11985 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11986#define IEM_MC_POP_U64(a_pu64Value) \
11987 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11988
11989/** Maps guest memory for direct or bounce buffered access.
11990 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11991 * @remarks May return.
11992 */
11993#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11995
11996/** Maps guest memory for direct or bounce buffered access.
11997 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11998 * @remarks May return.
11999 */
12000#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12001 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12002
12003/** Commits the memory and unmaps the guest memory.
12004 * @remarks May return.
12005 */
12006#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12007 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12008
12009/** Commits the memory and unmaps the guest memory unless the FPU status word
12010 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12011 * that would cause FLD not to store.
12012 *
12013 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12014 * store, while \#P will not.
12015 *
12016 * @remarks May in theory return - for now.
12017 */
12018#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12019 do { \
12020 if ( !(a_u16FSW & X86_FSW_ES) \
12021 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12022 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12023 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12024 } while (0)
12025
12026/** Calculate efficient address from R/M. */
12027#ifndef IEM_WITH_SETJMP
12028# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12029 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12030#else
12031# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12032 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12033#endif
12034
12035#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12036#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12037#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12038#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12039#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12040#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12041#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12042
12043/**
12044 * Defers the rest of the instruction emulation to a C implementation routine
12045 * and returns, only taking the standard parameters.
12046 *
12047 * @param a_pfnCImpl The pointer to the C routine.
12048 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12049 */
12050#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12051
12052/**
12053 * Defers the rest of instruction emulation to a C implementation routine and
12054 * returns, taking one argument in addition to the standard ones.
12055 *
12056 * @param a_pfnCImpl The pointer to the C routine.
12057 * @param a0 The argument.
12058 */
12059#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12060
12061/**
12062 * Defers the rest of the instruction emulation to a C implementation routine
12063 * and returns, taking two arguments in addition to the standard ones.
12064 *
12065 * @param a_pfnCImpl The pointer to the C routine.
12066 * @param a0 The first extra argument.
12067 * @param a1 The second extra argument.
12068 */
12069#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12070
12071/**
12072 * Defers the rest of the instruction emulation to a C implementation routine
12073 * and returns, taking three arguments in addition to the standard ones.
12074 *
12075 * @param a_pfnCImpl The pointer to the C routine.
12076 * @param a0 The first extra argument.
12077 * @param a1 The second extra argument.
12078 * @param a2 The third extra argument.
12079 */
12080#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12081
12082/**
12083 * Defers the rest of the instruction emulation to a C implementation routine
12084 * and returns, taking four arguments in addition to the standard ones.
12085 *
12086 * @param a_pfnCImpl The pointer to the C routine.
12087 * @param a0 The first extra argument.
12088 * @param a1 The second extra argument.
12089 * @param a2 The third extra argument.
12090 * @param a3 The fourth extra argument.
12091 */
12092#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12093
12094/**
12095 * Defers the rest of the instruction emulation to a C implementation routine
12096 * and returns, taking two arguments in addition to the standard ones.
12097 *
12098 * @param a_pfnCImpl The pointer to the C routine.
12099 * @param a0 The first extra argument.
12100 * @param a1 The second extra argument.
12101 * @param a2 The third extra argument.
12102 * @param a3 The fourth extra argument.
12103 * @param a4 The fifth extra argument.
12104 */
12105#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12106
12107/**
12108 * Defers the entire instruction emulation to a C implementation routine and
12109 * returns, only taking the standard parameters.
12110 *
12111 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12112 *
12113 * @param a_pfnCImpl The pointer to the C routine.
12114 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12115 */
12116#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12117
12118/**
12119 * Defers the entire instruction emulation to a C implementation routine and
12120 * returns, taking one argument in addition to the standard ones.
12121 *
12122 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12123 *
12124 * @param a_pfnCImpl The pointer to the C routine.
12125 * @param a0 The argument.
12126 */
12127#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12128
12129/**
12130 * Defers the entire instruction emulation to a C implementation routine and
12131 * returns, taking two arguments in addition to the standard ones.
12132 *
12133 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12134 *
12135 * @param a_pfnCImpl The pointer to the C routine.
12136 * @param a0 The first extra argument.
12137 * @param a1 The second extra argument.
12138 */
12139#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12140
12141/**
12142 * Defers the entire instruction emulation to a C implementation routine and
12143 * returns, taking three arguments in addition to the standard ones.
12144 *
12145 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12146 *
12147 * @param a_pfnCImpl The pointer to the C routine.
12148 * @param a0 The first extra argument.
12149 * @param a1 The second extra argument.
12150 * @param a2 The third extra argument.
12151 */
12152#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12153
12154/**
12155 * Calls a FPU assembly implementation taking one visible argument.
12156 *
12157 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12158 * @param a0 The first extra argument.
12159 */
12160#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12161 do { \
12162 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12163 } while (0)
12164
12165/**
12166 * Calls a FPU assembly implementation taking two visible arguments.
12167 *
12168 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12169 * @param a0 The first extra argument.
12170 * @param a1 The second extra argument.
12171 */
12172#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12173 do { \
12174 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12175 } while (0)
12176
12177/**
12178 * Calls a FPU assembly implementation taking three visible arguments.
12179 *
12180 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12181 * @param a0 The first extra argument.
12182 * @param a1 The second extra argument.
12183 * @param a2 The third extra argument.
12184 */
12185#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12186 do { \
12187 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12188 } while (0)
12189
12190#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12191 do { \
12192 (a_FpuData).FSW = (a_FSW); \
12193 (a_FpuData).r80Result = *(a_pr80Value); \
12194 } while (0)
12195
12196/** Pushes FPU result onto the stack. */
12197#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12198 iemFpuPushResult(pVCpu, &a_FpuData)
12199/** Pushes FPU result onto the stack and sets the FPUDP. */
12200#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12201 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12202
12203/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12204#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12205 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12206
12207/** Stores FPU result in a stack register. */
12208#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12209 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12210/** Stores FPU result in a stack register and pops the stack. */
12211#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12212 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12213/** Stores FPU result in a stack register and sets the FPUDP. */
12214#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12215 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12216/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12217 * stack. */
12218#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12219 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12220
12221/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12222#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12223 iemFpuUpdateOpcodeAndIp(pVCpu)
12224/** Free a stack register (for FFREE and FFREEP). */
12225#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12226 iemFpuStackFree(pVCpu, a_iStReg)
12227/** Increment the FPU stack pointer. */
12228#define IEM_MC_FPU_STACK_INC_TOP() \
12229 iemFpuStackIncTop(pVCpu)
12230/** Decrement the FPU stack pointer. */
12231#define IEM_MC_FPU_STACK_DEC_TOP() \
12232 iemFpuStackDecTop(pVCpu)
12233
12234/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12235#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12236 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12237/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12238#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12239 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12240/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12241#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12242 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12243/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12244#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12245 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12246/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12247 * stack. */
12248#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12249 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12250/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12251#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12252 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12253
12254/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12255#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12256 iemFpuStackUnderflow(pVCpu, a_iStDst)
12257/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12258 * stack. */
12259#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12260 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12261/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12262 * FPUDS. */
12263#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12264 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12265/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12266 * FPUDS. Pops stack. */
12267#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12268 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12269/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12270 * stack twice. */
12271#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12272 iemFpuStackUnderflowThenPopPop(pVCpu)
12273/** Raises a FPU stack underflow exception for an instruction pushing a result
12274 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12275#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12276 iemFpuStackPushUnderflow(pVCpu)
12277/** Raises a FPU stack underflow exception for an instruction pushing a result
12278 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12279#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12280 iemFpuStackPushUnderflowTwo(pVCpu)
12281
12282/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12283 * FPUIP, FPUCS and FOP. */
12284#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12285 iemFpuStackPushOverflow(pVCpu)
12286/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12287 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12288#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12289 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12290/** Prepares for using the FPU state.
12291 * Ensures that we can use the host FPU in the current context (RC+R0.
12292 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12293#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12294/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12295#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12296/** Actualizes the guest FPU state so it can be accessed and modified. */
12297#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12298
12299/** Prepares for using the SSE state.
12300 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12301 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12302#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12303/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12304#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12305/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12306#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12307
12308/** Prepares for using the AVX state.
12309 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12310 * Ensures the guest AVX state in the CPUMCTX is up to date.
12311 * @note This will include the AVX512 state too when support for it is added
12312 * due to the zero extending feature of VEX instruction. */
12313#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12314/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12315#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12316/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12317#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12318
12319/**
12320 * Calls a MMX assembly implementation taking two visible arguments.
12321 *
12322 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12323 * @param a0 The first extra argument.
12324 * @param a1 The second extra argument.
12325 */
12326#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12327 do { \
12328 IEM_MC_PREPARE_FPU_USAGE(); \
12329 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12330 } while (0)
12331
12332/**
12333 * Calls a MMX assembly implementation taking three visible arguments.
12334 *
12335 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12336 * @param a0 The first extra argument.
12337 * @param a1 The second extra argument.
12338 * @param a2 The third extra argument.
12339 */
12340#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12341 do { \
12342 IEM_MC_PREPARE_FPU_USAGE(); \
12343 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12344 } while (0)
12345
12346
12347/**
12348 * Calls a SSE assembly implementation taking two visible arguments.
12349 *
12350 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12351 * @param a0 The first extra argument.
12352 * @param a1 The second extra argument.
12353 */
12354#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12355 do { \
12356 IEM_MC_PREPARE_SSE_USAGE(); \
12357 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12358 } while (0)
12359
12360/**
12361 * Calls a SSE assembly implementation taking three visible arguments.
12362 *
12363 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12364 * @param a0 The first extra argument.
12365 * @param a1 The second extra argument.
12366 * @param a2 The third extra argument.
12367 */
12368#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12369 do { \
12370 IEM_MC_PREPARE_SSE_USAGE(); \
12371 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12372 } while (0)
12373
12374
12375/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12376 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12377#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12378 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12379
12380/**
12381 * Calls a AVX assembly implementation taking two visible arguments.
12382 *
12383 * There is one implicit zero'th argument, a pointer to the extended state.
12384 *
12385 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12386 * @param a1 The first extra argument.
12387 * @param a2 The second extra argument.
12388 */
12389#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12390 do { \
12391 IEM_MC_PREPARE_AVX_USAGE(); \
12392 a_pfnAImpl(pXState, (a1), (a2)); \
12393 } while (0)
12394
12395/**
12396 * Calls a AVX assembly implementation taking three visible arguments.
12397 *
12398 * There is one implicit zero'th argument, a pointer to the extended state.
12399 *
12400 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12401 * @param a1 The first extra argument.
12402 * @param a2 The second extra argument.
12403 * @param a3 The third extra argument.
12404 */
12405#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12406 do { \
12407 IEM_MC_PREPARE_AVX_USAGE(); \
12408 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12409 } while (0)
12410
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12413/** @note Not for IOPL or IF testing. */
12414#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12417/** @note Not for IOPL or IF testing. */
12418#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12421 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12422 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12425 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12426 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12429 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12430 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12431 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12432/** @note Not for IOPL or IF testing. */
12433#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12434 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12435 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12436 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12437#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12438#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12439#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12440/** @note Not for IOPL or IF testing. */
12441#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12442 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12443 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12444/** @note Not for IOPL or IF testing. */
12445#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12446 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12447 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12448/** @note Not for IOPL or IF testing. */
12449#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12450 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12451 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12452/** @note Not for IOPL or IF testing. */
12453#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12454 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12455 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12456/** @note Not for IOPL or IF testing. */
12457#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12458 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12459 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12460/** @note Not for IOPL or IF testing. */
12461#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12462 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12463 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12464#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12465#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12466
12467#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12468 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12469#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12470 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12471#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12472 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12473#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12474 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12475#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12476 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12477#define IEM_MC_IF_FCW_IM() \
12478 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12479
12480#define IEM_MC_ELSE() } else {
12481#define IEM_MC_ENDIF() } do {} while (0)
12482
12483/** @} */
12484
12485
12486/** @name Opcode Debug Helpers.
12487 * @{
12488 */
12489#ifdef VBOX_WITH_STATISTICS
12490# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12491#else
12492# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12493#endif
12494
12495#ifdef DEBUG
12496# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12497 do { \
12498 IEMOP_INC_STATS(a_Stats); \
12499 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12500 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12501 } while (0)
12502
12503# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12504 do { \
12505 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12506 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12507 (void)RT_CONCAT(OP_,a_Upper); \
12508 (void)(a_fDisHints); \
12509 (void)(a_fIemHints); \
12510 } while (0)
12511
12512# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12513 do { \
12514 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12515 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12516 (void)RT_CONCAT(OP_,a_Upper); \
12517 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12518 (void)(a_fDisHints); \
12519 (void)(a_fIemHints); \
12520 } while (0)
12521
12522# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12523 do { \
12524 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12525 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12526 (void)RT_CONCAT(OP_,a_Upper); \
12527 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12528 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12529 (void)(a_fDisHints); \
12530 (void)(a_fIemHints); \
12531 } while (0)
12532
12533# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12534 do { \
12535 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12536 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12537 (void)RT_CONCAT(OP_,a_Upper); \
12538 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12540 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12541 (void)(a_fDisHints); \
12542 (void)(a_fIemHints); \
12543 } while (0)
12544
12545# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12546 do { \
12547 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12548 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12549 (void)RT_CONCAT(OP_,a_Upper); \
12550 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12551 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12554 (void)(a_fDisHints); \
12555 (void)(a_fIemHints); \
12556 } while (0)
12557
12558#else
12559# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12560
12561# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12563# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12564 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12565# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12567# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12568 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12569# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12571
12572#endif
12573
12574#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12575 IEMOP_MNEMONIC0EX(a_Lower, \
12576 #a_Lower, \
12577 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12578#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12579 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12580 #a_Lower " " #a_Op1, \
12581 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12582#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12583 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12584 #a_Lower " " #a_Op1 "," #a_Op2, \
12585 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12586#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12587 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12588 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12589 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12590#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12591 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12592 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12593 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12594
12595/** @} */
12596
12597
12598/** @name Opcode Helpers.
12599 * @{
12600 */
12601
12602#ifdef IN_RING3
12603# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12604 do { \
12605 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12606 else \
12607 { \
12608 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12609 return IEMOP_RAISE_INVALID_OPCODE(); \
12610 } \
12611 } while (0)
12612#else
12613# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12614 do { \
12615 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12616 else return IEMOP_RAISE_INVALID_OPCODE(); \
12617 } while (0)
12618#endif
12619
12620/** The instruction requires a 186 or later. */
12621#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12622# define IEMOP_HLP_MIN_186() do { } while (0)
12623#else
12624# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12625#endif
12626
12627/** The instruction requires a 286 or later. */
12628#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12629# define IEMOP_HLP_MIN_286() do { } while (0)
12630#else
12631# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12632#endif
12633
12634/** The instruction requires a 386 or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12636# define IEMOP_HLP_MIN_386() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12639#endif
12640
12641/** The instruction requires a 386 or later if the given expression is true. */
12642#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12643# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12644#else
12645# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12646#endif
12647
12648/** The instruction requires a 486 or later. */
12649#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12650# define IEMOP_HLP_MIN_486() do { } while (0)
12651#else
12652# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12653#endif
12654
12655/** The instruction requires a Pentium (586) or later. */
12656#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12657# define IEMOP_HLP_MIN_586() do { } while (0)
12658#else
12659# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12660#endif
12661
12662/** The instruction requires a PentiumPro (686) or later. */
12663#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12664# define IEMOP_HLP_MIN_686() do { } while (0)
12665#else
12666# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12667#endif
12668
12669
12670/** The instruction raises an \#UD in real and V8086 mode. */
12671#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12672 do \
12673 { \
12674 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12675 else return IEMOP_RAISE_INVALID_OPCODE(); \
12676 } while (0)
12677
12678/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12679 * 64-bit mode. */
12680#define IEMOP_HLP_NO_64BIT() \
12681 do \
12682 { \
12683 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12684 return IEMOP_RAISE_INVALID_OPCODE(); \
12685 } while (0)
12686
12687/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12688 * 64-bit mode. */
12689#define IEMOP_HLP_ONLY_64BIT() \
12690 do \
12691 { \
12692 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12693 return IEMOP_RAISE_INVALID_OPCODE(); \
12694 } while (0)
12695
12696/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12697#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12698 do \
12699 { \
12700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12701 iemRecalEffOpSize64Default(pVCpu); \
12702 } while (0)
12703
12704/** The instruction has 64-bit operand size if 64-bit mode. */
12705#define IEMOP_HLP_64BIT_OP_SIZE() \
12706 do \
12707 { \
12708 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12709 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12710 } while (0)
12711
12712/** Only a REX prefix immediately preceeding the first opcode byte takes
12713 * effect. This macro helps ensuring this as well as logging bad guest code. */
12714#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12715 do \
12716 { \
12717 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12718 { \
12719 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12720 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12721 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12722 pVCpu->iem.s.uRexB = 0; \
12723 pVCpu->iem.s.uRexIndex = 0; \
12724 pVCpu->iem.s.uRexReg = 0; \
12725 iemRecalEffOpSize(pVCpu); \
12726 } \
12727 } while (0)
12728
12729/**
12730 * Done decoding.
12731 */
12732#define IEMOP_HLP_DONE_DECODING() \
12733 do \
12734 { \
12735 /*nothing for now, maybe later... */ \
12736 } while (0)
12737
12738/**
12739 * Done decoding, raise \#UD exception if lock prefix present.
12740 */
12741#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12742 do \
12743 { \
12744 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12745 { /* likely */ } \
12746 else \
12747 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12748 } while (0)
12749
12750
12751/**
12752 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12753 * repnz or size prefixes are present, or if in real or v8086 mode.
12754 */
12755#define IEMOP_HLP_DONE_VEX_DECODING() \
12756 do \
12757 { \
12758 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12759 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12760 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12761 { /* likely */ } \
12762 else \
12763 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12764 } while (0)
12765
12766/**
12767 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12768 * repnz or size prefixes are present, or if in real or v8086 mode.
12769 */
12770#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12771 do \
12772 { \
12773 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12774 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12775 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12776 && pVCpu->iem.s.uVexLength == 0)) \
12777 { /* likely */ } \
12778 else \
12779 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12780 } while (0)
12781
12782
12783/**
12784 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12785 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12786 * register 0, or if in real or v8086 mode.
12787 */
12788#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12789 do \
12790 { \
12791 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12792 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12793 && !pVCpu->iem.s.uVex3rdReg \
12794 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12795 { /* likely */ } \
12796 else \
12797 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12798 } while (0)
12799
12800/**
12801 * Done decoding VEX, no V, L=0.
12802 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12803 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12804 */
12805#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12806 do \
12807 { \
12808 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12809 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12810 && pVCpu->iem.s.uVexLength == 0 \
12811 && pVCpu->iem.s.uVex3rdReg == 0 \
12812 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12813 { /* likely */ } \
12814 else \
12815 return IEMOP_RAISE_INVALID_OPCODE(); \
12816 } while (0)
12817
12818#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12819 do \
12820 { \
12821 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12822 { /* likely */ } \
12823 else \
12824 { \
12825 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12826 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12827 } \
12828 } while (0)
12829#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12830 do \
12831 { \
12832 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12833 { /* likely */ } \
12834 else \
12835 { \
12836 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12837 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12838 } \
12839 } while (0)
12840
12841/**
12842 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12843 * are present.
12844 */
12845#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12846 do \
12847 { \
12848 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12849 { /* likely */ } \
12850 else \
12851 return IEMOP_RAISE_INVALID_OPCODE(); \
12852 } while (0)
12853
12854
12855#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
12856/** Check and handles SVM nested-guest instruction intercept and updates
12857 * NRIP if needed. */
12858# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12859 do \
12860 { \
12861 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12862 { \
12863 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12864 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12865 } \
12866 } while (0)
12867
12868/** Check and handle SVM nested-guest CR0 read intercept. */
12869# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12870 do \
12871 { \
12872 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12873 { \
12874 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12875 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12876 } \
12877 } while (0)
12878
12879#else /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12880# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12881# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12882#endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12883
12884
12885/**
12886 * Calculates the effective address of a ModR/M memory operand.
12887 *
12888 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12889 *
12890 * @return Strict VBox status code.
12891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12892 * @param bRm The ModRM byte.
12893 * @param cbImm The size of any immediate following the
12894 * effective address opcode bytes. Important for
12895 * RIP relative addressing.
12896 * @param pGCPtrEff Where to return the effective address.
12897 */
12898IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12899{
12900 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12901 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12902# define SET_SS_DEF() \
12903 do \
12904 { \
12905 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12906 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12907 } while (0)
12908
12909 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12910 {
12911/** @todo Check the effective address size crap! */
12912 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12913 {
12914 uint16_t u16EffAddr;
12915
12916 /* Handle the disp16 form with no registers first. */
12917 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12918 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12919 else
12920 {
12921 /* Get the displacment. */
12922 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12923 {
12924 case 0: u16EffAddr = 0; break;
12925 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12926 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12927 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12928 }
12929
12930 /* Add the base and index registers to the disp. */
12931 switch (bRm & X86_MODRM_RM_MASK)
12932 {
12933 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12934 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12935 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12936 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12937 case 4: u16EffAddr += pCtx->si; break;
12938 case 5: u16EffAddr += pCtx->di; break;
12939 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12940 case 7: u16EffAddr += pCtx->bx; break;
12941 }
12942 }
12943
12944 *pGCPtrEff = u16EffAddr;
12945 }
12946 else
12947 {
12948 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12949 uint32_t u32EffAddr;
12950
12951 /* Handle the disp32 form with no registers first. */
12952 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12953 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12954 else
12955 {
12956 /* Get the register (or SIB) value. */
12957 switch ((bRm & X86_MODRM_RM_MASK))
12958 {
12959 case 0: u32EffAddr = pCtx->eax; break;
12960 case 1: u32EffAddr = pCtx->ecx; break;
12961 case 2: u32EffAddr = pCtx->edx; break;
12962 case 3: u32EffAddr = pCtx->ebx; break;
12963 case 4: /* SIB */
12964 {
12965 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12966
12967 /* Get the index and scale it. */
12968 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12969 {
12970 case 0: u32EffAddr = pCtx->eax; break;
12971 case 1: u32EffAddr = pCtx->ecx; break;
12972 case 2: u32EffAddr = pCtx->edx; break;
12973 case 3: u32EffAddr = pCtx->ebx; break;
12974 case 4: u32EffAddr = 0; /*none */ break;
12975 case 5: u32EffAddr = pCtx->ebp; break;
12976 case 6: u32EffAddr = pCtx->esi; break;
12977 case 7: u32EffAddr = pCtx->edi; break;
12978 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12979 }
12980 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12981
12982 /* add base */
12983 switch (bSib & X86_SIB_BASE_MASK)
12984 {
12985 case 0: u32EffAddr += pCtx->eax; break;
12986 case 1: u32EffAddr += pCtx->ecx; break;
12987 case 2: u32EffAddr += pCtx->edx; break;
12988 case 3: u32EffAddr += pCtx->ebx; break;
12989 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12990 case 5:
12991 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12992 {
12993 u32EffAddr += pCtx->ebp;
12994 SET_SS_DEF();
12995 }
12996 else
12997 {
12998 uint32_t u32Disp;
12999 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13000 u32EffAddr += u32Disp;
13001 }
13002 break;
13003 case 6: u32EffAddr += pCtx->esi; break;
13004 case 7: u32EffAddr += pCtx->edi; break;
13005 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13006 }
13007 break;
13008 }
13009 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13010 case 6: u32EffAddr = pCtx->esi; break;
13011 case 7: u32EffAddr = pCtx->edi; break;
13012 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13013 }
13014
13015 /* Get and add the displacement. */
13016 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13017 {
13018 case 0:
13019 break;
13020 case 1:
13021 {
13022 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13023 u32EffAddr += i8Disp;
13024 break;
13025 }
13026 case 2:
13027 {
13028 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13029 u32EffAddr += u32Disp;
13030 break;
13031 }
13032 default:
13033 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13034 }
13035
13036 }
13037 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13038 *pGCPtrEff = u32EffAddr;
13039 else
13040 {
13041 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13042 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13043 }
13044 }
13045 }
13046 else
13047 {
13048 uint64_t u64EffAddr;
13049
13050 /* Handle the rip+disp32 form with no registers first. */
13051 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13052 {
13053 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13054 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13055 }
13056 else
13057 {
13058 /* Get the register (or SIB) value. */
13059 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13060 {
13061 case 0: u64EffAddr = pCtx->rax; break;
13062 case 1: u64EffAddr = pCtx->rcx; break;
13063 case 2: u64EffAddr = pCtx->rdx; break;
13064 case 3: u64EffAddr = pCtx->rbx; break;
13065 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13066 case 6: u64EffAddr = pCtx->rsi; break;
13067 case 7: u64EffAddr = pCtx->rdi; break;
13068 case 8: u64EffAddr = pCtx->r8; break;
13069 case 9: u64EffAddr = pCtx->r9; break;
13070 case 10: u64EffAddr = pCtx->r10; break;
13071 case 11: u64EffAddr = pCtx->r11; break;
13072 case 13: u64EffAddr = pCtx->r13; break;
13073 case 14: u64EffAddr = pCtx->r14; break;
13074 case 15: u64EffAddr = pCtx->r15; break;
13075 /* SIB */
13076 case 4:
13077 case 12:
13078 {
13079 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13080
13081 /* Get the index and scale it. */
13082 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13083 {
13084 case 0: u64EffAddr = pCtx->rax; break;
13085 case 1: u64EffAddr = pCtx->rcx; break;
13086 case 2: u64EffAddr = pCtx->rdx; break;
13087 case 3: u64EffAddr = pCtx->rbx; break;
13088 case 4: u64EffAddr = 0; /*none */ break;
13089 case 5: u64EffAddr = pCtx->rbp; break;
13090 case 6: u64EffAddr = pCtx->rsi; break;
13091 case 7: u64EffAddr = pCtx->rdi; break;
13092 case 8: u64EffAddr = pCtx->r8; break;
13093 case 9: u64EffAddr = pCtx->r9; break;
13094 case 10: u64EffAddr = pCtx->r10; break;
13095 case 11: u64EffAddr = pCtx->r11; break;
13096 case 12: u64EffAddr = pCtx->r12; break;
13097 case 13: u64EffAddr = pCtx->r13; break;
13098 case 14: u64EffAddr = pCtx->r14; break;
13099 case 15: u64EffAddr = pCtx->r15; break;
13100 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13101 }
13102 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13103
13104 /* add base */
13105 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13106 {
13107 case 0: u64EffAddr += pCtx->rax; break;
13108 case 1: u64EffAddr += pCtx->rcx; break;
13109 case 2: u64EffAddr += pCtx->rdx; break;
13110 case 3: u64EffAddr += pCtx->rbx; break;
13111 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13112 case 6: u64EffAddr += pCtx->rsi; break;
13113 case 7: u64EffAddr += pCtx->rdi; break;
13114 case 8: u64EffAddr += pCtx->r8; break;
13115 case 9: u64EffAddr += pCtx->r9; break;
13116 case 10: u64EffAddr += pCtx->r10; break;
13117 case 11: u64EffAddr += pCtx->r11; break;
13118 case 12: u64EffAddr += pCtx->r12; break;
13119 case 14: u64EffAddr += pCtx->r14; break;
13120 case 15: u64EffAddr += pCtx->r15; break;
13121 /* complicated encodings */
13122 case 5:
13123 case 13:
13124 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13125 {
13126 if (!pVCpu->iem.s.uRexB)
13127 {
13128 u64EffAddr += pCtx->rbp;
13129 SET_SS_DEF();
13130 }
13131 else
13132 u64EffAddr += pCtx->r13;
13133 }
13134 else
13135 {
13136 uint32_t u32Disp;
13137 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13138 u64EffAddr += (int32_t)u32Disp;
13139 }
13140 break;
13141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13142 }
13143 break;
13144 }
13145 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13146 }
13147
13148 /* Get and add the displacement. */
13149 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13150 {
13151 case 0:
13152 break;
13153 case 1:
13154 {
13155 int8_t i8Disp;
13156 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13157 u64EffAddr += i8Disp;
13158 break;
13159 }
13160 case 2:
13161 {
13162 uint32_t u32Disp;
13163 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13164 u64EffAddr += (int32_t)u32Disp;
13165 break;
13166 }
13167 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13168 }
13169
13170 }
13171
13172 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13173 *pGCPtrEff = u64EffAddr;
13174 else
13175 {
13176 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13177 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13178 }
13179 }
13180
13181 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13182 return VINF_SUCCESS;
13183}
13184
13185
13186/**
13187 * Calculates the effective address of a ModR/M memory operand.
13188 *
13189 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13190 *
13191 * @return Strict VBox status code.
13192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13193 * @param bRm The ModRM byte.
13194 * @param cbImm The size of any immediate following the
13195 * effective address opcode bytes. Important for
13196 * RIP relative addressing.
13197 * @param pGCPtrEff Where to return the effective address.
13198 * @param offRsp RSP displacement.
13199 */
13200IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13201{
13202 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13203 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13204# define SET_SS_DEF() \
13205 do \
13206 { \
13207 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13208 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13209 } while (0)
13210
13211 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13212 {
13213/** @todo Check the effective address size crap! */
13214 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13215 {
13216 uint16_t u16EffAddr;
13217
13218 /* Handle the disp16 form with no registers first. */
13219 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13220 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13221 else
13222 {
13223 /* Get the displacment. */
13224 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13225 {
13226 case 0: u16EffAddr = 0; break;
13227 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13228 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13229 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13230 }
13231
13232 /* Add the base and index registers to the disp. */
13233 switch (bRm & X86_MODRM_RM_MASK)
13234 {
13235 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13236 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13237 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13238 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13239 case 4: u16EffAddr += pCtx->si; break;
13240 case 5: u16EffAddr += pCtx->di; break;
13241 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13242 case 7: u16EffAddr += pCtx->bx; break;
13243 }
13244 }
13245
13246 *pGCPtrEff = u16EffAddr;
13247 }
13248 else
13249 {
13250 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13251 uint32_t u32EffAddr;
13252
13253 /* Handle the disp32 form with no registers first. */
13254 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13255 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13256 else
13257 {
13258 /* Get the register (or SIB) value. */
13259 switch ((bRm & X86_MODRM_RM_MASK))
13260 {
13261 case 0: u32EffAddr = pCtx->eax; break;
13262 case 1: u32EffAddr = pCtx->ecx; break;
13263 case 2: u32EffAddr = pCtx->edx; break;
13264 case 3: u32EffAddr = pCtx->ebx; break;
13265 case 4: /* SIB */
13266 {
13267 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13268
13269 /* Get the index and scale it. */
13270 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13271 {
13272 case 0: u32EffAddr = pCtx->eax; break;
13273 case 1: u32EffAddr = pCtx->ecx; break;
13274 case 2: u32EffAddr = pCtx->edx; break;
13275 case 3: u32EffAddr = pCtx->ebx; break;
13276 case 4: u32EffAddr = 0; /*none */ break;
13277 case 5: u32EffAddr = pCtx->ebp; break;
13278 case 6: u32EffAddr = pCtx->esi; break;
13279 case 7: u32EffAddr = pCtx->edi; break;
13280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13281 }
13282 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13283
13284 /* add base */
13285 switch (bSib & X86_SIB_BASE_MASK)
13286 {
13287 case 0: u32EffAddr += pCtx->eax; break;
13288 case 1: u32EffAddr += pCtx->ecx; break;
13289 case 2: u32EffAddr += pCtx->edx; break;
13290 case 3: u32EffAddr += pCtx->ebx; break;
13291 case 4:
13292 u32EffAddr += pCtx->esp + offRsp;
13293 SET_SS_DEF();
13294 break;
13295 case 5:
13296 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13297 {
13298 u32EffAddr += pCtx->ebp;
13299 SET_SS_DEF();
13300 }
13301 else
13302 {
13303 uint32_t u32Disp;
13304 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13305 u32EffAddr += u32Disp;
13306 }
13307 break;
13308 case 6: u32EffAddr += pCtx->esi; break;
13309 case 7: u32EffAddr += pCtx->edi; break;
13310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13311 }
13312 break;
13313 }
13314 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13315 case 6: u32EffAddr = pCtx->esi; break;
13316 case 7: u32EffAddr = pCtx->edi; break;
13317 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13318 }
13319
13320 /* Get and add the displacement. */
13321 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13322 {
13323 case 0:
13324 break;
13325 case 1:
13326 {
13327 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13328 u32EffAddr += i8Disp;
13329 break;
13330 }
13331 case 2:
13332 {
13333 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13334 u32EffAddr += u32Disp;
13335 break;
13336 }
13337 default:
13338 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13339 }
13340
13341 }
13342 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13343 *pGCPtrEff = u32EffAddr;
13344 else
13345 {
13346 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13347 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13348 }
13349 }
13350 }
13351 else
13352 {
13353 uint64_t u64EffAddr;
13354
13355 /* Handle the rip+disp32 form with no registers first. */
13356 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13357 {
13358 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13359 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13360 }
13361 else
13362 {
13363 /* Get the register (or SIB) value. */
13364 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13365 {
13366 case 0: u64EffAddr = pCtx->rax; break;
13367 case 1: u64EffAddr = pCtx->rcx; break;
13368 case 2: u64EffAddr = pCtx->rdx; break;
13369 case 3: u64EffAddr = pCtx->rbx; break;
13370 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13371 case 6: u64EffAddr = pCtx->rsi; break;
13372 case 7: u64EffAddr = pCtx->rdi; break;
13373 case 8: u64EffAddr = pCtx->r8; break;
13374 case 9: u64EffAddr = pCtx->r9; break;
13375 case 10: u64EffAddr = pCtx->r10; break;
13376 case 11: u64EffAddr = pCtx->r11; break;
13377 case 13: u64EffAddr = pCtx->r13; break;
13378 case 14: u64EffAddr = pCtx->r14; break;
13379 case 15: u64EffAddr = pCtx->r15; break;
13380 /* SIB */
13381 case 4:
13382 case 12:
13383 {
13384 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13385
13386 /* Get the index and scale it. */
13387 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13388 {
13389 case 0: u64EffAddr = pCtx->rax; break;
13390 case 1: u64EffAddr = pCtx->rcx; break;
13391 case 2: u64EffAddr = pCtx->rdx; break;
13392 case 3: u64EffAddr = pCtx->rbx; break;
13393 case 4: u64EffAddr = 0; /*none */ break;
13394 case 5: u64EffAddr = pCtx->rbp; break;
13395 case 6: u64EffAddr = pCtx->rsi; break;
13396 case 7: u64EffAddr = pCtx->rdi; break;
13397 case 8: u64EffAddr = pCtx->r8; break;
13398 case 9: u64EffAddr = pCtx->r9; break;
13399 case 10: u64EffAddr = pCtx->r10; break;
13400 case 11: u64EffAddr = pCtx->r11; break;
13401 case 12: u64EffAddr = pCtx->r12; break;
13402 case 13: u64EffAddr = pCtx->r13; break;
13403 case 14: u64EffAddr = pCtx->r14; break;
13404 case 15: u64EffAddr = pCtx->r15; break;
13405 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13406 }
13407 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13408
13409 /* add base */
13410 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13411 {
13412 case 0: u64EffAddr += pCtx->rax; break;
13413 case 1: u64EffAddr += pCtx->rcx; break;
13414 case 2: u64EffAddr += pCtx->rdx; break;
13415 case 3: u64EffAddr += pCtx->rbx; break;
13416 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13417 case 6: u64EffAddr += pCtx->rsi; break;
13418 case 7: u64EffAddr += pCtx->rdi; break;
13419 case 8: u64EffAddr += pCtx->r8; break;
13420 case 9: u64EffAddr += pCtx->r9; break;
13421 case 10: u64EffAddr += pCtx->r10; break;
13422 case 11: u64EffAddr += pCtx->r11; break;
13423 case 12: u64EffAddr += pCtx->r12; break;
13424 case 14: u64EffAddr += pCtx->r14; break;
13425 case 15: u64EffAddr += pCtx->r15; break;
13426 /* complicated encodings */
13427 case 5:
13428 case 13:
13429 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13430 {
13431 if (!pVCpu->iem.s.uRexB)
13432 {
13433 u64EffAddr += pCtx->rbp;
13434 SET_SS_DEF();
13435 }
13436 else
13437 u64EffAddr += pCtx->r13;
13438 }
13439 else
13440 {
13441 uint32_t u32Disp;
13442 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13443 u64EffAddr += (int32_t)u32Disp;
13444 }
13445 break;
13446 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13447 }
13448 break;
13449 }
13450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13451 }
13452
13453 /* Get and add the displacement. */
13454 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13455 {
13456 case 0:
13457 break;
13458 case 1:
13459 {
13460 int8_t i8Disp;
13461 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13462 u64EffAddr += i8Disp;
13463 break;
13464 }
13465 case 2:
13466 {
13467 uint32_t u32Disp;
13468 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13469 u64EffAddr += (int32_t)u32Disp;
13470 break;
13471 }
13472 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13473 }
13474
13475 }
13476
13477 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13478 *pGCPtrEff = u64EffAddr;
13479 else
13480 {
13481 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13482 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13483 }
13484 }
13485
13486 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13487 return VINF_SUCCESS;
13488}
13489
13490
13491#ifdef IEM_WITH_SETJMP
13492/**
13493 * Calculates the effective address of a ModR/M memory operand.
13494 *
13495 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13496 *
13497 * May longjmp on internal error.
13498 *
13499 * @return The effective address.
13500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13501 * @param bRm The ModRM byte.
13502 * @param cbImm The size of any immediate following the
13503 * effective address opcode bytes. Important for
13504 * RIP relative addressing.
13505 */
13506IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13507{
13508 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13509 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13510# define SET_SS_DEF() \
13511 do \
13512 { \
13513 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13514 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13515 } while (0)
13516
13517 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13518 {
13519/** @todo Check the effective address size crap! */
13520 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13521 {
13522 uint16_t u16EffAddr;
13523
13524 /* Handle the disp16 form with no registers first. */
13525 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13526 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13527 else
13528 {
13529 /* Get the displacment. */
13530 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13531 {
13532 case 0: u16EffAddr = 0; break;
13533 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13534 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13535 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13536 }
13537
13538 /* Add the base and index registers to the disp. */
13539 switch (bRm & X86_MODRM_RM_MASK)
13540 {
13541 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13542 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13543 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13544 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13545 case 4: u16EffAddr += pCtx->si; break;
13546 case 5: u16EffAddr += pCtx->di; break;
13547 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13548 case 7: u16EffAddr += pCtx->bx; break;
13549 }
13550 }
13551
13552 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13553 return u16EffAddr;
13554 }
13555
13556 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13557 uint32_t u32EffAddr;
13558
13559 /* Handle the disp32 form with no registers first. */
13560 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13561 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13562 else
13563 {
13564 /* Get the register (or SIB) value. */
13565 switch ((bRm & X86_MODRM_RM_MASK))
13566 {
13567 case 0: u32EffAddr = pCtx->eax; break;
13568 case 1: u32EffAddr = pCtx->ecx; break;
13569 case 2: u32EffAddr = pCtx->edx; break;
13570 case 3: u32EffAddr = pCtx->ebx; break;
13571 case 4: /* SIB */
13572 {
13573 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13574
13575 /* Get the index and scale it. */
13576 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13577 {
13578 case 0: u32EffAddr = pCtx->eax; break;
13579 case 1: u32EffAddr = pCtx->ecx; break;
13580 case 2: u32EffAddr = pCtx->edx; break;
13581 case 3: u32EffAddr = pCtx->ebx; break;
13582 case 4: u32EffAddr = 0; /*none */ break;
13583 case 5: u32EffAddr = pCtx->ebp; break;
13584 case 6: u32EffAddr = pCtx->esi; break;
13585 case 7: u32EffAddr = pCtx->edi; break;
13586 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13587 }
13588 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13589
13590 /* add base */
13591 switch (bSib & X86_SIB_BASE_MASK)
13592 {
13593 case 0: u32EffAddr += pCtx->eax; break;
13594 case 1: u32EffAddr += pCtx->ecx; break;
13595 case 2: u32EffAddr += pCtx->edx; break;
13596 case 3: u32EffAddr += pCtx->ebx; break;
13597 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13598 case 5:
13599 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13600 {
13601 u32EffAddr += pCtx->ebp;
13602 SET_SS_DEF();
13603 }
13604 else
13605 {
13606 uint32_t u32Disp;
13607 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13608 u32EffAddr += u32Disp;
13609 }
13610 break;
13611 case 6: u32EffAddr += pCtx->esi; break;
13612 case 7: u32EffAddr += pCtx->edi; break;
13613 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13614 }
13615 break;
13616 }
13617 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13618 case 6: u32EffAddr = pCtx->esi; break;
13619 case 7: u32EffAddr = pCtx->edi; break;
13620 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13621 }
13622
13623 /* Get and add the displacement. */
13624 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13625 {
13626 case 0:
13627 break;
13628 case 1:
13629 {
13630 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13631 u32EffAddr += i8Disp;
13632 break;
13633 }
13634 case 2:
13635 {
13636 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13637 u32EffAddr += u32Disp;
13638 break;
13639 }
13640 default:
13641 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13642 }
13643 }
13644
13645 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13646 {
13647 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13648 return u32EffAddr;
13649 }
13650 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13651 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13652 return u32EffAddr & UINT16_MAX;
13653 }
13654
13655 uint64_t u64EffAddr;
13656
13657 /* Handle the rip+disp32 form with no registers first. */
13658 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13659 {
13660 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13661 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13662 }
13663 else
13664 {
13665 /* Get the register (or SIB) value. */
13666 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13667 {
13668 case 0: u64EffAddr = pCtx->rax; break;
13669 case 1: u64EffAddr = pCtx->rcx; break;
13670 case 2: u64EffAddr = pCtx->rdx; break;
13671 case 3: u64EffAddr = pCtx->rbx; break;
13672 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13673 case 6: u64EffAddr = pCtx->rsi; break;
13674 case 7: u64EffAddr = pCtx->rdi; break;
13675 case 8: u64EffAddr = pCtx->r8; break;
13676 case 9: u64EffAddr = pCtx->r9; break;
13677 case 10: u64EffAddr = pCtx->r10; break;
13678 case 11: u64EffAddr = pCtx->r11; break;
13679 case 13: u64EffAddr = pCtx->r13; break;
13680 case 14: u64EffAddr = pCtx->r14; break;
13681 case 15: u64EffAddr = pCtx->r15; break;
13682 /* SIB */
13683 case 4:
13684 case 12:
13685 {
13686 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13687
13688 /* Get the index and scale it. */
13689 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13690 {
13691 case 0: u64EffAddr = pCtx->rax; break;
13692 case 1: u64EffAddr = pCtx->rcx; break;
13693 case 2: u64EffAddr = pCtx->rdx; break;
13694 case 3: u64EffAddr = pCtx->rbx; break;
13695 case 4: u64EffAddr = 0; /*none */ break;
13696 case 5: u64EffAddr = pCtx->rbp; break;
13697 case 6: u64EffAddr = pCtx->rsi; break;
13698 case 7: u64EffAddr = pCtx->rdi; break;
13699 case 8: u64EffAddr = pCtx->r8; break;
13700 case 9: u64EffAddr = pCtx->r9; break;
13701 case 10: u64EffAddr = pCtx->r10; break;
13702 case 11: u64EffAddr = pCtx->r11; break;
13703 case 12: u64EffAddr = pCtx->r12; break;
13704 case 13: u64EffAddr = pCtx->r13; break;
13705 case 14: u64EffAddr = pCtx->r14; break;
13706 case 15: u64EffAddr = pCtx->r15; break;
13707 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13708 }
13709 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13710
13711 /* add base */
13712 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13713 {
13714 case 0: u64EffAddr += pCtx->rax; break;
13715 case 1: u64EffAddr += pCtx->rcx; break;
13716 case 2: u64EffAddr += pCtx->rdx; break;
13717 case 3: u64EffAddr += pCtx->rbx; break;
13718 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13719 case 6: u64EffAddr += pCtx->rsi; break;
13720 case 7: u64EffAddr += pCtx->rdi; break;
13721 case 8: u64EffAddr += pCtx->r8; break;
13722 case 9: u64EffAddr += pCtx->r9; break;
13723 case 10: u64EffAddr += pCtx->r10; break;
13724 case 11: u64EffAddr += pCtx->r11; break;
13725 case 12: u64EffAddr += pCtx->r12; break;
13726 case 14: u64EffAddr += pCtx->r14; break;
13727 case 15: u64EffAddr += pCtx->r15; break;
13728 /* complicated encodings */
13729 case 5:
13730 case 13:
13731 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13732 {
13733 if (!pVCpu->iem.s.uRexB)
13734 {
13735 u64EffAddr += pCtx->rbp;
13736 SET_SS_DEF();
13737 }
13738 else
13739 u64EffAddr += pCtx->r13;
13740 }
13741 else
13742 {
13743 uint32_t u32Disp;
13744 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13745 u64EffAddr += (int32_t)u32Disp;
13746 }
13747 break;
13748 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13749 }
13750 break;
13751 }
13752 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13753 }
13754
13755 /* Get and add the displacement. */
13756 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13757 {
13758 case 0:
13759 break;
13760 case 1:
13761 {
13762 int8_t i8Disp;
13763 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13764 u64EffAddr += i8Disp;
13765 break;
13766 }
13767 case 2:
13768 {
13769 uint32_t u32Disp;
13770 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13771 u64EffAddr += (int32_t)u32Disp;
13772 break;
13773 }
13774 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13775 }
13776
13777 }
13778
13779 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13780 {
13781 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13782 return u64EffAddr;
13783 }
13784 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13785 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13786 return u64EffAddr & UINT32_MAX;
13787}
13788#endif /* IEM_WITH_SETJMP */
13789
13790
13791/** @} */
13792
13793
13794
13795/*
13796 * Include the instructions
13797 */
13798#include "IEMAllInstructions.cpp.h"
13799
13800
13801
13802
13803#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13804
13805/**
13806 * Sets up execution verification mode.
13807 */
13808IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13809{
13810 PVMCPU pVCpu = pVCpu;
13811 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13812
13813 /*
13814 * Always note down the address of the current instruction.
13815 */
13816 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13817 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13818
13819 /*
13820 * Enable verification and/or logging.
13821 */
13822 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13823 if ( fNewNoRem
13824 && ( 0
13825#if 0 /* auto enable on first paged protected mode interrupt */
13826 || ( pOrgCtx->eflags.Bits.u1IF
13827 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13828 && TRPMHasTrap(pVCpu)
13829 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13830#endif
13831#if 0
13832 || ( pOrgCtx->cs == 0x10
13833 && ( pOrgCtx->rip == 0x90119e3e
13834 || pOrgCtx->rip == 0x901d9810)
13835#endif
13836#if 0 /* Auto enable DSL - FPU stuff. */
13837 || ( pOrgCtx->cs == 0x10
13838 && (// pOrgCtx->rip == 0xc02ec07f
13839 //|| pOrgCtx->rip == 0xc02ec082
13840 //|| pOrgCtx->rip == 0xc02ec0c9
13841 0
13842 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13843#endif
13844#if 0 /* Auto enable DSL - fstp st0 stuff. */
13845 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13846#endif
13847#if 0
13848 || pOrgCtx->rip == 0x9022bb3a
13849#endif
13850#if 0
13851 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13852#endif
13853#if 0
13854 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13856#endif
13857#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13858 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13859 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13860 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13861#endif
13862#if 0 /* NT4SP1 - xadd early boot. */
13863 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13864#endif
13865#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13866 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13867#endif
13868#if 0 /* NT4SP1 - cmpxchg (AMD). */
13869 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13870#endif
13871#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13872 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13873#endif
13874#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13875 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13876
13877#endif
13878#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13879 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13880
13881#endif
13882#if 0 /* NT4SP1 - frstor [ecx] */
13883 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13884#endif
13885#if 0 /* xxxxxx - All long mode code. */
13886 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13887#endif
13888#if 0 /* rep movsq linux 3.7 64-bit boot. */
13889 || (pOrgCtx->rip == 0x0000000000100241)
13890#endif
13891#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13892 || (pOrgCtx->rip == 0x000000000215e240)
13893#endif
13894#if 0 /* DOS's size-overridden iret to v8086. */
13895 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13896#endif
13897 )
13898 )
13899 {
13900 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13901 RTLogFlags(NULL, "enabled");
13902 fNewNoRem = false;
13903 }
13904 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13905 {
13906 pVCpu->iem.s.fNoRem = fNewNoRem;
13907 if (!fNewNoRem)
13908 {
13909 LogAlways(("Enabling verification mode!\n"));
13910 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13911 }
13912 else
13913 LogAlways(("Disabling verification mode!\n"));
13914 }
13915
13916 /*
13917 * Switch state.
13918 */
13919 if (IEM_VERIFICATION_ENABLED(pVCpu))
13920 {
13921 static CPUMCTX s_DebugCtx; /* Ugly! */
13922
13923 s_DebugCtx = *pOrgCtx;
13924 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13925 }
13926
13927 /*
13928 * See if there is an interrupt pending in TRPM and inject it if we can.
13929 */
13930 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13931 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13932#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13933 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13934 if (fIntrEnabled)
13935 {
13936 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13937 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13938 else
13939 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13940 }
13941#else
13942 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13943#endif
13944 if ( fIntrEnabled
13945 && TRPMHasTrap(pVCpu)
13946 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13947 {
13948 uint8_t u8TrapNo;
13949 TRPMEVENT enmType;
13950 RTGCUINT uErrCode;
13951 RTGCPTR uCr2;
13952 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13953 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13954 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13955 TRPMResetTrap(pVCpu);
13956 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13957 }
13958
13959 /*
13960 * Reset the counters.
13961 */
13962 pVCpu->iem.s.cIOReads = 0;
13963 pVCpu->iem.s.cIOWrites = 0;
13964 pVCpu->iem.s.fIgnoreRaxRdx = false;
13965 pVCpu->iem.s.fOverlappingMovs = false;
13966 pVCpu->iem.s.fProblematicMemory = false;
13967 pVCpu->iem.s.fUndefinedEFlags = 0;
13968
13969 if (IEM_VERIFICATION_ENABLED(pVCpu))
13970 {
13971 /*
13972 * Free all verification records.
13973 */
13974 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13975 pVCpu->iem.s.pIemEvtRecHead = NULL;
13976 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13977 do
13978 {
13979 while (pEvtRec)
13980 {
13981 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13982 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13983 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13984 pEvtRec = pNext;
13985 }
13986 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13987 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13988 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13989 } while (pEvtRec);
13990 }
13991}
13992
13993
13994/**
13995 * Allocate an event record.
13996 * @returns Pointer to a record.
13997 */
13998IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13999{
14000 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14001 return NULL;
14002
14003 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
14004 if (pEvtRec)
14005 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
14006 else
14007 {
14008 if (!pVCpu->iem.s.ppIemEvtRecNext)
14009 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
14010
14011 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
14012 if (!pEvtRec)
14013 return NULL;
14014 }
14015 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14016 pEvtRec->pNext = NULL;
14017 return pEvtRec;
14018}
14019
14020
14021/**
14022 * IOMMMIORead notification.
14023 */
14024VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14025{
14026 PVMCPU pVCpu = VMMGetCpu(pVM);
14027 if (!pVCpu)
14028 return;
14029 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14030 if (!pEvtRec)
14031 return;
14032 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14033 pEvtRec->u.RamRead.GCPhys = GCPhys;
14034 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14035 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14036 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14037}
14038
14039
14040/**
14041 * IOMMMIOWrite notification.
14042 */
14043VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14044{
14045 PVMCPU pVCpu = VMMGetCpu(pVM);
14046 if (!pVCpu)
14047 return;
14048 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14049 if (!pEvtRec)
14050 return;
14051 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14052 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14053 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14054 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14055 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14056 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14057 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14058 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14059 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14060}
14061
14062
14063/**
14064 * IOMIOPortRead notification.
14065 */
14066VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14067{
14068 PVMCPU pVCpu = VMMGetCpu(pVM);
14069 if (!pVCpu)
14070 return;
14071 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14072 if (!pEvtRec)
14073 return;
14074 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14075 pEvtRec->u.IOPortRead.Port = Port;
14076 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14077 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14078 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14079}
14080
14081/**
14082 * IOMIOPortWrite notification.
14083 */
14084VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14085{
14086 PVMCPU pVCpu = VMMGetCpu(pVM);
14087 if (!pVCpu)
14088 return;
14089 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14090 if (!pEvtRec)
14091 return;
14092 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14093 pEvtRec->u.IOPortWrite.Port = Port;
14094 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14095 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14096 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14097 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14098}
14099
14100
14101VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14102{
14103 PVMCPU pVCpu = VMMGetCpu(pVM);
14104 if (!pVCpu)
14105 return;
14106 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14107 if (!pEvtRec)
14108 return;
14109 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14110 pEvtRec->u.IOPortStrRead.Port = Port;
14111 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14112 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14113 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14114 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14115}
14116
14117
14118VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14119{
14120 PVMCPU pVCpu = VMMGetCpu(pVM);
14121 if (!pVCpu)
14122 return;
14123 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14124 if (!pEvtRec)
14125 return;
14126 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14127 pEvtRec->u.IOPortStrWrite.Port = Port;
14128 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14129 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14130 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14131 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14132}
14133
14134
14135/**
14136 * Fakes and records an I/O port read.
14137 *
14138 * @returns VINF_SUCCESS.
14139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14140 * @param Port The I/O port.
14141 * @param pu32Value Where to store the fake value.
14142 * @param cbValue The size of the access.
14143 */
14144IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14145{
14146 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14147 if (pEvtRec)
14148 {
14149 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14150 pEvtRec->u.IOPortRead.Port = Port;
14151 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14152 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14153 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14154 }
14155 pVCpu->iem.s.cIOReads++;
14156 *pu32Value = 0xcccccccc;
14157 return VINF_SUCCESS;
14158}
14159
14160
14161/**
14162 * Fakes and records an I/O port write.
14163 *
14164 * @returns VINF_SUCCESS.
14165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14166 * @param Port The I/O port.
14167 * @param u32Value The value being written.
14168 * @param cbValue The size of the access.
14169 */
14170IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14171{
14172 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14173 if (pEvtRec)
14174 {
14175 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14176 pEvtRec->u.IOPortWrite.Port = Port;
14177 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14178 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14179 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14180 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14181 }
14182 pVCpu->iem.s.cIOWrites++;
14183 return VINF_SUCCESS;
14184}
14185
14186
14187/**
14188 * Used to add extra details about a stub case.
14189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14190 */
14191IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14192{
14193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14194 PVM pVM = pVCpu->CTX_SUFF(pVM);
14195 PVMCPU pVCpu = pVCpu;
14196 char szRegs[4096];
14197 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14198 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14199 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14200 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14201 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14202 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14203 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14204 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14205 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14206 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14207 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14208 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14209 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14210 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14211 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14212 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14213 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14214 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14215 " efer=%016VR{efer}\n"
14216 " pat=%016VR{pat}\n"
14217 " sf_mask=%016VR{sf_mask}\n"
14218 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14219 " lstar=%016VR{lstar}\n"
14220 " star=%016VR{star} cstar=%016VR{cstar}\n"
14221 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14222 );
14223
14224 char szInstr1[256];
14225 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14226 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14227 szInstr1, sizeof(szInstr1), NULL);
14228 char szInstr2[256];
14229 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14230 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14231 szInstr2, sizeof(szInstr2), NULL);
14232
14233 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14234}
14235
14236
14237/**
14238 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14239 * dump to the assertion info.
14240 *
14241 * @param pEvtRec The record to dump.
14242 */
14243IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14244{
14245 switch (pEvtRec->enmEvent)
14246 {
14247 case IEMVERIFYEVENT_IOPORT_READ:
14248 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14249 pEvtRec->u.IOPortWrite.Port,
14250 pEvtRec->u.IOPortWrite.cbValue);
14251 break;
14252 case IEMVERIFYEVENT_IOPORT_WRITE:
14253 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14254 pEvtRec->u.IOPortWrite.Port,
14255 pEvtRec->u.IOPortWrite.cbValue,
14256 pEvtRec->u.IOPortWrite.u32Value);
14257 break;
14258 case IEMVERIFYEVENT_IOPORT_STR_READ:
14259 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14260 pEvtRec->u.IOPortStrWrite.Port,
14261 pEvtRec->u.IOPortStrWrite.cbValue,
14262 pEvtRec->u.IOPortStrWrite.cTransfers);
14263 break;
14264 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14265 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14266 pEvtRec->u.IOPortStrWrite.Port,
14267 pEvtRec->u.IOPortStrWrite.cbValue,
14268 pEvtRec->u.IOPortStrWrite.cTransfers);
14269 break;
14270 case IEMVERIFYEVENT_RAM_READ:
14271 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14272 pEvtRec->u.RamRead.GCPhys,
14273 pEvtRec->u.RamRead.cb);
14274 break;
14275 case IEMVERIFYEVENT_RAM_WRITE:
14276 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14277 pEvtRec->u.RamWrite.GCPhys,
14278 pEvtRec->u.RamWrite.cb,
14279 (int)pEvtRec->u.RamWrite.cb,
14280 pEvtRec->u.RamWrite.ab);
14281 break;
14282 default:
14283 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14284 break;
14285 }
14286}
14287
14288
14289/**
14290 * Raises an assertion on the specified record, showing the given message with
14291 * a record dump attached.
14292 *
14293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14294 * @param pEvtRec1 The first record.
14295 * @param pEvtRec2 The second record.
14296 * @param pszMsg The message explaining why we're asserting.
14297 */
14298IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14299{
14300 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14301 iemVerifyAssertAddRecordDump(pEvtRec1);
14302 iemVerifyAssertAddRecordDump(pEvtRec2);
14303 iemVerifyAssertMsg2(pVCpu);
14304 RTAssertPanic();
14305}
14306
14307
14308/**
14309 * Raises an assertion on the specified record, showing the given message with
14310 * a record dump attached.
14311 *
14312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14313 * @param pEvtRec1 The first record.
14314 * @param pszMsg The message explaining why we're asserting.
14315 */
14316IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14317{
14318 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14319 iemVerifyAssertAddRecordDump(pEvtRec);
14320 iemVerifyAssertMsg2(pVCpu);
14321 RTAssertPanic();
14322}
14323
14324
14325/**
14326 * Verifies a write record.
14327 *
14328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14329 * @param pEvtRec The write record.
14330 * @param fRem Set if REM was doing the other executing. If clear
14331 * it was HM.
14332 */
14333IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14334{
14335 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14336 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14337 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14338 if ( RT_FAILURE(rc)
14339 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14340 {
14341 /* fend off ins */
14342 if ( !pVCpu->iem.s.cIOReads
14343 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14344 || ( pEvtRec->u.RamWrite.cb != 1
14345 && pEvtRec->u.RamWrite.cb != 2
14346 && pEvtRec->u.RamWrite.cb != 4) )
14347 {
14348 /* fend off ROMs and MMIO */
14349 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14350 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14351 {
14352 /* fend off fxsave */
14353 if (pEvtRec->u.RamWrite.cb != 512)
14354 {
14355 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14356 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14357 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14358 RTAssertMsg2Add("%s: %.*Rhxs\n"
14359 "iem: %.*Rhxs\n",
14360 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14361 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14362 iemVerifyAssertAddRecordDump(pEvtRec);
14363 iemVerifyAssertMsg2(pVCpu);
14364 RTAssertPanic();
14365 }
14366 }
14367 }
14368 }
14369
14370}
14371
14372/**
14373 * Performs the post-execution verfication checks.
14374 */
14375IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14376{
14377 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14378 return rcStrictIem;
14379
14380 /*
14381 * Switch back the state.
14382 */
14383 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14384 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14385 Assert(pOrgCtx != pDebugCtx);
14386 IEM_GET_CTX(pVCpu) = pOrgCtx;
14387
14388 /*
14389 * Execute the instruction in REM.
14390 */
14391 bool fRem = false;
14392 PVM pVM = pVCpu->CTX_SUFF(pVM);
14393 PVMCPU pVCpu = pVCpu;
14394 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14395#ifdef IEM_VERIFICATION_MODE_FULL_HM
14396 if ( HMIsEnabled(pVM)
14397 && pVCpu->iem.s.cIOReads == 0
14398 && pVCpu->iem.s.cIOWrites == 0
14399 && !pVCpu->iem.s.fProblematicMemory)
14400 {
14401 uint64_t uStartRip = pOrgCtx->rip;
14402 unsigned iLoops = 0;
14403 do
14404 {
14405 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14406 iLoops++;
14407 } while ( rc == VINF_SUCCESS
14408 || ( rc == VINF_EM_DBG_STEPPED
14409 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14410 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14411 || ( pOrgCtx->rip != pDebugCtx->rip
14412 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14413 && iLoops < 8) );
14414 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14415 rc = VINF_SUCCESS;
14416 }
14417#endif
14418 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14419 || rc == VINF_IOM_R3_IOPORT_READ
14420 || rc == VINF_IOM_R3_IOPORT_WRITE
14421 || rc == VINF_IOM_R3_MMIO_READ
14422 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14423 || rc == VINF_IOM_R3_MMIO_WRITE
14424 || rc == VINF_CPUM_R3_MSR_READ
14425 || rc == VINF_CPUM_R3_MSR_WRITE
14426 || rc == VINF_EM_RESCHEDULE
14427 )
14428 {
14429 EMRemLock(pVM);
14430 rc = REMR3EmulateInstruction(pVM, pVCpu);
14431 AssertRC(rc);
14432 EMRemUnlock(pVM);
14433 fRem = true;
14434 }
14435
14436# if 1 /* Skip unimplemented instructions for now. */
14437 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14438 {
14439 IEM_GET_CTX(pVCpu) = pOrgCtx;
14440 if (rc == VINF_EM_DBG_STEPPED)
14441 return VINF_SUCCESS;
14442 return rc;
14443 }
14444# endif
14445
14446 /*
14447 * Compare the register states.
14448 */
14449 unsigned cDiffs = 0;
14450 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14451 {
14452 //Log(("REM and IEM ends up with different registers!\n"));
14453 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14454
14455# define CHECK_FIELD(a_Field) \
14456 do \
14457 { \
14458 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14459 { \
14460 switch (sizeof(pOrgCtx->a_Field)) \
14461 { \
14462 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14463 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14464 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14465 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14466 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14467 } \
14468 cDiffs++; \
14469 } \
14470 } while (0)
14471# define CHECK_XSTATE_FIELD(a_Field) \
14472 do \
14473 { \
14474 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14475 { \
14476 switch (sizeof(pOrgXState->a_Field)) \
14477 { \
14478 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14479 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14480 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14481 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14482 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14483 } \
14484 cDiffs++; \
14485 } \
14486 } while (0)
14487
14488# define CHECK_BIT_FIELD(a_Field) \
14489 do \
14490 { \
14491 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14492 { \
14493 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14494 cDiffs++; \
14495 } \
14496 } while (0)
14497
14498# define CHECK_SEL(a_Sel) \
14499 do \
14500 { \
14501 CHECK_FIELD(a_Sel.Sel); \
14502 CHECK_FIELD(a_Sel.Attr.u); \
14503 CHECK_FIELD(a_Sel.u64Base); \
14504 CHECK_FIELD(a_Sel.u32Limit); \
14505 CHECK_FIELD(a_Sel.fFlags); \
14506 } while (0)
14507
14508 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14509 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14510
14511#if 1 /* The recompiler doesn't update these the intel way. */
14512 if (fRem)
14513 {
14514 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14515 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14516 pOrgXState->x87.CS = pDebugXState->x87.CS;
14517 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14518 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14519 pOrgXState->x87.DS = pDebugXState->x87.DS;
14520 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14521 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14522 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14523 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14524 }
14525#endif
14526 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14527 {
14528 RTAssertMsg2Weak(" the FPU state differs\n");
14529 cDiffs++;
14530 CHECK_XSTATE_FIELD(x87.FCW);
14531 CHECK_XSTATE_FIELD(x87.FSW);
14532 CHECK_XSTATE_FIELD(x87.FTW);
14533 CHECK_XSTATE_FIELD(x87.FOP);
14534 CHECK_XSTATE_FIELD(x87.FPUIP);
14535 CHECK_XSTATE_FIELD(x87.CS);
14536 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14537 CHECK_XSTATE_FIELD(x87.FPUDP);
14538 CHECK_XSTATE_FIELD(x87.DS);
14539 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14540 CHECK_XSTATE_FIELD(x87.MXCSR);
14541 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14542 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14543 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14545 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14546 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14547 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14548 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14549 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14550 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14551 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14552 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14553 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14554 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14555 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14556 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14557 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14558 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14559 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14560 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14561 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14562 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14563 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14564 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14565 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14566 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14567 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14568 }
14569 CHECK_FIELD(rip);
14570 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14571 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14572 {
14573 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14574 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14575 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14576 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14577 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14578 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14579 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14580 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14581 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14582 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14583 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14584 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14585 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14586 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14587 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14588 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14589 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14590 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14591 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14592 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14593 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14594 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14595 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14596 }
14597
14598 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14599 CHECK_FIELD(rax);
14600 CHECK_FIELD(rcx);
14601 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14602 CHECK_FIELD(rdx);
14603 CHECK_FIELD(rbx);
14604 CHECK_FIELD(rsp);
14605 CHECK_FIELD(rbp);
14606 CHECK_FIELD(rsi);
14607 CHECK_FIELD(rdi);
14608 CHECK_FIELD(r8);
14609 CHECK_FIELD(r9);
14610 CHECK_FIELD(r10);
14611 CHECK_FIELD(r11);
14612 CHECK_FIELD(r12);
14613 CHECK_FIELD(r13);
14614 CHECK_SEL(cs);
14615 CHECK_SEL(ss);
14616 CHECK_SEL(ds);
14617 CHECK_SEL(es);
14618 CHECK_SEL(fs);
14619 CHECK_SEL(gs);
14620 CHECK_FIELD(cr0);
14621
14622 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14623 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14624 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14625 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14626 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14627 {
14628 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14629 { /* ignore */ }
14630 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14631 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14632 && fRem)
14633 { /* ignore */ }
14634 else
14635 CHECK_FIELD(cr2);
14636 }
14637 CHECK_FIELD(cr3);
14638 CHECK_FIELD(cr4);
14639 CHECK_FIELD(dr[0]);
14640 CHECK_FIELD(dr[1]);
14641 CHECK_FIELD(dr[2]);
14642 CHECK_FIELD(dr[3]);
14643 CHECK_FIELD(dr[6]);
14644 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14645 CHECK_FIELD(dr[7]);
14646 CHECK_FIELD(gdtr.cbGdt);
14647 CHECK_FIELD(gdtr.pGdt);
14648 CHECK_FIELD(idtr.cbIdt);
14649 CHECK_FIELD(idtr.pIdt);
14650 CHECK_SEL(ldtr);
14651 CHECK_SEL(tr);
14652 CHECK_FIELD(SysEnter.cs);
14653 CHECK_FIELD(SysEnter.eip);
14654 CHECK_FIELD(SysEnter.esp);
14655 CHECK_FIELD(msrEFER);
14656 CHECK_FIELD(msrSTAR);
14657 CHECK_FIELD(msrPAT);
14658 CHECK_FIELD(msrLSTAR);
14659 CHECK_FIELD(msrCSTAR);
14660 CHECK_FIELD(msrSFMASK);
14661 CHECK_FIELD(msrKERNELGSBASE);
14662
14663 if (cDiffs != 0)
14664 {
14665 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14666 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14667 RTAssertPanic();
14668 static bool volatile s_fEnterDebugger = true;
14669 if (s_fEnterDebugger)
14670 DBGFSTOP(pVM);
14671
14672# if 1 /* Ignore unimplemented instructions for now. */
14673 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14674 rcStrictIem = VINF_SUCCESS;
14675# endif
14676 }
14677# undef CHECK_FIELD
14678# undef CHECK_BIT_FIELD
14679 }
14680
14681 /*
14682 * If the register state compared fine, check the verification event
14683 * records.
14684 */
14685 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14686 {
14687 /*
14688 * Compare verficiation event records.
14689 * - I/O port accesses should be a 1:1 match.
14690 */
14691 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14692 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14693 while (pIemRec && pOtherRec)
14694 {
14695 /* Since we might miss RAM writes and reads, ignore reads and check
14696 that any written memory is the same extra ones. */
14697 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14698 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14699 && pIemRec->pNext)
14700 {
14701 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14702 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14703 pIemRec = pIemRec->pNext;
14704 }
14705
14706 /* Do the compare. */
14707 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14708 {
14709 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14710 break;
14711 }
14712 bool fEquals;
14713 switch (pIemRec->enmEvent)
14714 {
14715 case IEMVERIFYEVENT_IOPORT_READ:
14716 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14717 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14718 break;
14719 case IEMVERIFYEVENT_IOPORT_WRITE:
14720 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14721 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14722 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14723 break;
14724 case IEMVERIFYEVENT_IOPORT_STR_READ:
14725 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14726 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14727 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14728 break;
14729 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14730 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14731 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14732 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14733 break;
14734 case IEMVERIFYEVENT_RAM_READ:
14735 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14736 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14737 break;
14738 case IEMVERIFYEVENT_RAM_WRITE:
14739 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14740 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14741 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14742 break;
14743 default:
14744 fEquals = false;
14745 break;
14746 }
14747 if (!fEquals)
14748 {
14749 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14750 break;
14751 }
14752
14753 /* advance */
14754 pIemRec = pIemRec->pNext;
14755 pOtherRec = pOtherRec->pNext;
14756 }
14757
14758 /* Ignore extra writes and reads. */
14759 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14760 {
14761 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14762 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14763 pIemRec = pIemRec->pNext;
14764 }
14765 if (pIemRec != NULL)
14766 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14767 else if (pOtherRec != NULL)
14768 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14769 }
14770 IEM_GET_CTX(pVCpu) = pOrgCtx;
14771
14772 return rcStrictIem;
14773}
14774
14775#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14776
14777/* stubs */
14778IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14779{
14780 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14781 return VERR_INTERNAL_ERROR;
14782}
14783
14784IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14785{
14786 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14787 return VERR_INTERNAL_ERROR;
14788}
14789
14790#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14791
14792
14793#ifdef LOG_ENABLED
14794/**
14795 * Logs the current instruction.
14796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14797 * @param pCtx The current CPU context.
14798 * @param fSameCtx Set if we have the same context information as the VMM,
14799 * clear if we may have already executed an instruction in
14800 * our debug context. When clear, we assume IEMCPU holds
14801 * valid CPU mode info.
14802 */
14803IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14804{
14805# ifdef IN_RING3
14806 if (LogIs2Enabled())
14807 {
14808 char szInstr[256];
14809 uint32_t cbInstr = 0;
14810 if (fSameCtx)
14811 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14812 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14813 szInstr, sizeof(szInstr), &cbInstr);
14814 else
14815 {
14816 uint32_t fFlags = 0;
14817 switch (pVCpu->iem.s.enmCpuMode)
14818 {
14819 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14820 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14821 case IEMMODE_16BIT:
14822 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14823 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14824 else
14825 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14826 break;
14827 }
14828 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14829 szInstr, sizeof(szInstr), &cbInstr);
14830 }
14831
14832 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14833 Log2(("****\n"
14834 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14835 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14836 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14837 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14838 " %s\n"
14839 ,
14840 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14841 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14842 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14843 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14844 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14845 szInstr));
14846
14847 if (LogIs3Enabled())
14848 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14849 }
14850 else
14851# endif
14852 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14853 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14854 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14855}
14856#endif
14857
14858
14859/**
14860 * Makes status code addjustments (pass up from I/O and access handler)
14861 * as well as maintaining statistics.
14862 *
14863 * @returns Strict VBox status code to pass up.
14864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14865 * @param rcStrict The status from executing an instruction.
14866 */
14867DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14868{
14869 if (rcStrict != VINF_SUCCESS)
14870 {
14871 if (RT_SUCCESS(rcStrict))
14872 {
14873 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14874 || rcStrict == VINF_IOM_R3_IOPORT_READ
14875 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14876 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14877 || rcStrict == VINF_IOM_R3_MMIO_READ
14878 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14879 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14880 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14881 || rcStrict == VINF_CPUM_R3_MSR_READ
14882 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14883 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14884 || rcStrict == VINF_EM_RAW_TO_R3
14885 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14886 || rcStrict == VINF_EM_TRIPLE_FAULT
14887 /* raw-mode / virt handlers only: */
14888 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14889 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14890 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14891 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14892 || rcStrict == VINF_SELM_SYNC_GDT
14893 || rcStrict == VINF_CSAM_PENDING_ACTION
14894 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14895 /* nested hw.virt codes: */
14896 || rcStrict == VINF_SVM_VMEXIT
14897 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14898/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14899 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14900#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14901 if ( rcStrict == VINF_SVM_VMEXIT
14902 && rcPassUp == VINF_SUCCESS)
14903 rcStrict = VINF_SUCCESS;
14904 else
14905#endif
14906 if (rcPassUp == VINF_SUCCESS)
14907 pVCpu->iem.s.cRetInfStatuses++;
14908 else if ( rcPassUp < VINF_EM_FIRST
14909 || rcPassUp > VINF_EM_LAST
14910 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14911 {
14912 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14913 pVCpu->iem.s.cRetPassUpStatus++;
14914 rcStrict = rcPassUp;
14915 }
14916 else
14917 {
14918 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14919 pVCpu->iem.s.cRetInfStatuses++;
14920 }
14921 }
14922 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14923 pVCpu->iem.s.cRetAspectNotImplemented++;
14924 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14925 pVCpu->iem.s.cRetInstrNotImplemented++;
14926#ifdef IEM_VERIFICATION_MODE_FULL
14927 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14928 rcStrict = VINF_SUCCESS;
14929#endif
14930 else
14931 pVCpu->iem.s.cRetErrStatuses++;
14932 }
14933 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14934 {
14935 pVCpu->iem.s.cRetPassUpStatus++;
14936 rcStrict = pVCpu->iem.s.rcPassUp;
14937 }
14938
14939 return rcStrict;
14940}
14941
14942
14943/**
14944 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14945 * IEMExecOneWithPrefetchedByPC.
14946 *
14947 * Similar code is found in IEMExecLots.
14948 *
14949 * @return Strict VBox status code.
14950 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14952 * @param fExecuteInhibit If set, execute the instruction following CLI,
14953 * POP SS and MOV SS,GR.
14954 */
14955DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14956{
14957#ifdef IEM_WITH_SETJMP
14958 VBOXSTRICTRC rcStrict;
14959 jmp_buf JmpBuf;
14960 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14961 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14962 if ((rcStrict = setjmp(JmpBuf)) == 0)
14963 {
14964 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14965 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14966 }
14967 else
14968 pVCpu->iem.s.cLongJumps++;
14969 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14970#else
14971 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14972 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14973#endif
14974 if (rcStrict == VINF_SUCCESS)
14975 pVCpu->iem.s.cInstructions++;
14976 if (pVCpu->iem.s.cActiveMappings > 0)
14977 {
14978 Assert(rcStrict != VINF_SUCCESS);
14979 iemMemRollback(pVCpu);
14980 }
14981//#ifdef DEBUG
14982// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14983//#endif
14984
14985 /* Execute the next instruction as well if a cli, pop ss or
14986 mov ss, Gr has just completed successfully. */
14987 if ( fExecuteInhibit
14988 && rcStrict == VINF_SUCCESS
14989 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14990 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14991 {
14992 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14993 if (rcStrict == VINF_SUCCESS)
14994 {
14995#ifdef LOG_ENABLED
14996 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14997#endif
14998#ifdef IEM_WITH_SETJMP
14999 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15000 if ((rcStrict = setjmp(JmpBuf)) == 0)
15001 {
15002 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15003 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15004 }
15005 else
15006 pVCpu->iem.s.cLongJumps++;
15007 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15008#else
15009 IEM_OPCODE_GET_NEXT_U8(&b);
15010 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15011#endif
15012 if (rcStrict == VINF_SUCCESS)
15013 pVCpu->iem.s.cInstructions++;
15014 if (pVCpu->iem.s.cActiveMappings > 0)
15015 {
15016 Assert(rcStrict != VINF_SUCCESS);
15017 iemMemRollback(pVCpu);
15018 }
15019 }
15020 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15021 }
15022
15023 /*
15024 * Return value fiddling, statistics and sanity assertions.
15025 */
15026 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15027
15028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15030#if defined(IEM_VERIFICATION_MODE_FULL)
15031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15035#endif
15036 return rcStrict;
15037}
15038
15039
15040#ifdef IN_RC
15041/**
15042 * Re-enters raw-mode or ensure we return to ring-3.
15043 *
15044 * @returns rcStrict, maybe modified.
15045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15046 * @param pCtx The current CPU context.
15047 * @param rcStrict The status code returne by the interpreter.
15048 */
15049DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15050{
15051 if ( !pVCpu->iem.s.fInPatchCode
15052 && ( rcStrict == VINF_SUCCESS
15053 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15054 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15055 {
15056 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15057 CPUMRawEnter(pVCpu);
15058 else
15059 {
15060 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15061 rcStrict = VINF_EM_RESCHEDULE;
15062 }
15063 }
15064 return rcStrict;
15065}
15066#endif
15067
15068
15069/**
15070 * Execute one instruction.
15071 *
15072 * @return Strict VBox status code.
15073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15074 */
15075VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15076{
15077#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15078 if (++pVCpu->iem.s.cVerifyDepth == 1)
15079 iemExecVerificationModeSetup(pVCpu);
15080#endif
15081#ifdef LOG_ENABLED
15082 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15083 iemLogCurInstr(pVCpu, pCtx, true);
15084#endif
15085
15086 /*
15087 * Do the decoding and emulation.
15088 */
15089 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15090 if (rcStrict == VINF_SUCCESS)
15091 rcStrict = iemExecOneInner(pVCpu, true);
15092
15093#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15094 /*
15095 * Assert some sanity.
15096 */
15097 if (pVCpu->iem.s.cVerifyDepth == 1)
15098 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15099 pVCpu->iem.s.cVerifyDepth--;
15100#endif
15101#ifdef IN_RC
15102 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15103#endif
15104 if (rcStrict != VINF_SUCCESS)
15105 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15106 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15107 return rcStrict;
15108}
15109
15110
15111VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15112{
15113 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15114 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15115
15116 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15117 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15118 if (rcStrict == VINF_SUCCESS)
15119 {
15120 rcStrict = iemExecOneInner(pVCpu, true);
15121 if (pcbWritten)
15122 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15123 }
15124
15125#ifdef IN_RC
15126 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15127#endif
15128 return rcStrict;
15129}
15130
15131
15132VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15133 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15134{
15135 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15136 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15137
15138 VBOXSTRICTRC rcStrict;
15139 if ( cbOpcodeBytes
15140 && pCtx->rip == OpcodeBytesPC)
15141 {
15142 iemInitDecoder(pVCpu, false);
15143#ifdef IEM_WITH_CODE_TLB
15144 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15145 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15146 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15147 pVCpu->iem.s.offCurInstrStart = 0;
15148 pVCpu->iem.s.offInstrNextByte = 0;
15149#else
15150 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15151 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15152#endif
15153 rcStrict = VINF_SUCCESS;
15154 }
15155 else
15156 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15157 if (rcStrict == VINF_SUCCESS)
15158 {
15159 rcStrict = iemExecOneInner(pVCpu, true);
15160 }
15161
15162#ifdef IN_RC
15163 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15164#endif
15165 return rcStrict;
15166}
15167
15168
15169VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15170{
15171 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15172 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15173
15174 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15175 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15176 if (rcStrict == VINF_SUCCESS)
15177 {
15178 rcStrict = iemExecOneInner(pVCpu, false);
15179 if (pcbWritten)
15180 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15181 }
15182
15183#ifdef IN_RC
15184 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15185#endif
15186 return rcStrict;
15187}
15188
15189
15190VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15191 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15192{
15193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15194 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15195
15196 VBOXSTRICTRC rcStrict;
15197 if ( cbOpcodeBytes
15198 && pCtx->rip == OpcodeBytesPC)
15199 {
15200 iemInitDecoder(pVCpu, true);
15201#ifdef IEM_WITH_CODE_TLB
15202 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15203 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15204 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15205 pVCpu->iem.s.offCurInstrStart = 0;
15206 pVCpu->iem.s.offInstrNextByte = 0;
15207#else
15208 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15209 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15210#endif
15211 rcStrict = VINF_SUCCESS;
15212 }
15213 else
15214 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15215 if (rcStrict == VINF_SUCCESS)
15216 rcStrict = iemExecOneInner(pVCpu, false);
15217
15218#ifdef IN_RC
15219 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15220#endif
15221 return rcStrict;
15222}
15223
15224
15225/**
15226 * For debugging DISGetParamSize, may come in handy.
15227 *
15228 * @returns Strict VBox status code.
15229 * @param pVCpu The cross context virtual CPU structure of the
15230 * calling EMT.
15231 * @param pCtxCore The context core structure.
15232 * @param OpcodeBytesPC The PC of the opcode bytes.
15233 * @param pvOpcodeBytes Prefeched opcode bytes.
15234 * @param cbOpcodeBytes Number of prefetched bytes.
15235 * @param pcbWritten Where to return the number of bytes written.
15236 * Optional.
15237 */
15238VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15239 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15240 uint32_t *pcbWritten)
15241{
15242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15243 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15244
15245 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15246 VBOXSTRICTRC rcStrict;
15247 if ( cbOpcodeBytes
15248 && pCtx->rip == OpcodeBytesPC)
15249 {
15250 iemInitDecoder(pVCpu, true);
15251#ifdef IEM_WITH_CODE_TLB
15252 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15253 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15254 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15255 pVCpu->iem.s.offCurInstrStart = 0;
15256 pVCpu->iem.s.offInstrNextByte = 0;
15257#else
15258 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15259 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15260#endif
15261 rcStrict = VINF_SUCCESS;
15262 }
15263 else
15264 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15265 if (rcStrict == VINF_SUCCESS)
15266 {
15267 rcStrict = iemExecOneInner(pVCpu, false);
15268 if (pcbWritten)
15269 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15270 }
15271
15272#ifdef IN_RC
15273 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15274#endif
15275 return rcStrict;
15276}
15277
15278
15279VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15280{
15281 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15282
15283#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15284 /*
15285 * See if there is an interrupt pending in TRPM, inject it if we can.
15286 */
15287 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15288# ifdef IEM_VERIFICATION_MODE_FULL
15289 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15290# endif
15291
15292 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15293# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15294 bool fIntrEnabled = pCtx->hwvirt.Gif;
15295 if (fIntrEnabled)
15296 {
15297 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15298 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15299 else
15300 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15301 }
15302# else
15303 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15304# endif
15305 if ( fIntrEnabled
15306 && TRPMHasTrap(pVCpu)
15307 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15308 {
15309 uint8_t u8TrapNo;
15310 TRPMEVENT enmType;
15311 RTGCUINT uErrCode;
15312 RTGCPTR uCr2;
15313 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15314 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15315 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15316 TRPMResetTrap(pVCpu);
15317 }
15318
15319 /*
15320 * Log the state.
15321 */
15322# ifdef LOG_ENABLED
15323 iemLogCurInstr(pVCpu, pCtx, true);
15324# endif
15325
15326 /*
15327 * Do the decoding and emulation.
15328 */
15329 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15330 if (rcStrict == VINF_SUCCESS)
15331 rcStrict = iemExecOneInner(pVCpu, true);
15332
15333 /*
15334 * Assert some sanity.
15335 */
15336 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15337
15338 /*
15339 * Log and return.
15340 */
15341 if (rcStrict != VINF_SUCCESS)
15342 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15343 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15344 if (pcInstructions)
15345 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15346 return rcStrict;
15347
15348#else /* Not verification mode */
15349
15350 /*
15351 * See if there is an interrupt pending in TRPM, inject it if we can.
15352 */
15353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15354# ifdef IEM_VERIFICATION_MODE_FULL
15355 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15356# endif
15357
15358 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15359# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15360 bool fIntrEnabled = pCtx->hwvirt.fGif;
15361 if (fIntrEnabled)
15362 {
15363 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15364 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15365 else
15366 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15367 }
15368# else
15369 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15370# endif
15371 if ( fIntrEnabled
15372 && TRPMHasTrap(pVCpu)
15373 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15374 {
15375 uint8_t u8TrapNo;
15376 TRPMEVENT enmType;
15377 RTGCUINT uErrCode;
15378 RTGCPTR uCr2;
15379 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15380 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15381 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15382 TRPMResetTrap(pVCpu);
15383 }
15384
15385 /*
15386 * Initial decoder init w/ prefetch, then setup setjmp.
15387 */
15388 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15389 if (rcStrict == VINF_SUCCESS)
15390 {
15391# ifdef IEM_WITH_SETJMP
15392 jmp_buf JmpBuf;
15393 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15394 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15395 pVCpu->iem.s.cActiveMappings = 0;
15396 if ((rcStrict = setjmp(JmpBuf)) == 0)
15397# endif
15398 {
15399 /*
15400 * The run loop. We limit ourselves to 4096 instructions right now.
15401 */
15402 PVM pVM = pVCpu->CTX_SUFF(pVM);
15403 uint32_t cInstr = 4096;
15404 for (;;)
15405 {
15406 /*
15407 * Log the state.
15408 */
15409# ifdef LOG_ENABLED
15410 iemLogCurInstr(pVCpu, pCtx, true);
15411# endif
15412
15413 /*
15414 * Do the decoding and emulation.
15415 */
15416 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15417 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15418 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15419 {
15420 Assert(pVCpu->iem.s.cActiveMappings == 0);
15421 pVCpu->iem.s.cInstructions++;
15422 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15423 {
15424 uint32_t fCpu = pVCpu->fLocalForcedActions
15425 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15426 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15427 | VMCPU_FF_TLB_FLUSH
15428# ifdef VBOX_WITH_RAW_MODE
15429 | VMCPU_FF_TRPM_SYNC_IDT
15430 | VMCPU_FF_SELM_SYNC_TSS
15431 | VMCPU_FF_SELM_SYNC_GDT
15432 | VMCPU_FF_SELM_SYNC_LDT
15433# endif
15434 | VMCPU_FF_INHIBIT_INTERRUPTS
15435 | VMCPU_FF_BLOCK_NMIS
15436 | VMCPU_FF_UNHALT ));
15437
15438 if (RT_LIKELY( ( !fCpu
15439 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15440 && !pCtx->rflags.Bits.u1IF) )
15441 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15442 {
15443 if (cInstr-- > 0)
15444 {
15445 Assert(pVCpu->iem.s.cActiveMappings == 0);
15446 iemReInitDecoder(pVCpu);
15447 continue;
15448 }
15449 }
15450 }
15451 Assert(pVCpu->iem.s.cActiveMappings == 0);
15452 }
15453 else if (pVCpu->iem.s.cActiveMappings > 0)
15454 iemMemRollback(pVCpu);
15455 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15456 break;
15457 }
15458 }
15459# ifdef IEM_WITH_SETJMP
15460 else
15461 {
15462 if (pVCpu->iem.s.cActiveMappings > 0)
15463 iemMemRollback(pVCpu);
15464 pVCpu->iem.s.cLongJumps++;
15465 }
15466 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15467# endif
15468
15469 /*
15470 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15471 */
15472 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15474# if defined(IEM_VERIFICATION_MODE_FULL)
15475 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15476 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15477 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15478 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15479# endif
15480 }
15481# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15482 else
15483 {
15484 /*
15485 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15486 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15487 */
15488 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15489 }
15490# endif
15491
15492 /*
15493 * Maybe re-enter raw-mode and log.
15494 */
15495# ifdef IN_RC
15496 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15497# endif
15498 if (rcStrict != VINF_SUCCESS)
15499 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15500 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15501 if (pcInstructions)
15502 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15503 return rcStrict;
15504#endif /* Not verification mode */
15505}
15506
15507
15508
15509/**
15510 * Injects a trap, fault, abort, software interrupt or external interrupt.
15511 *
15512 * The parameter list matches TRPMQueryTrapAll pretty closely.
15513 *
15514 * @returns Strict VBox status code.
15515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15516 * @param u8TrapNo The trap number.
15517 * @param enmType What type is it (trap/fault/abort), software
15518 * interrupt or hardware interrupt.
15519 * @param uErrCode The error code if applicable.
15520 * @param uCr2 The CR2 value if applicable.
15521 * @param cbInstr The instruction length (only relevant for
15522 * software interrupts).
15523 */
15524VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15525 uint8_t cbInstr)
15526{
15527 iemInitDecoder(pVCpu, false);
15528#ifdef DBGFTRACE_ENABLED
15529 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15530 u8TrapNo, enmType, uErrCode, uCr2);
15531#endif
15532
15533 uint32_t fFlags;
15534 switch (enmType)
15535 {
15536 case TRPM_HARDWARE_INT:
15537 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15538 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15539 uErrCode = uCr2 = 0;
15540 break;
15541
15542 case TRPM_SOFTWARE_INT:
15543 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15544 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15545 uErrCode = uCr2 = 0;
15546 break;
15547
15548 case TRPM_TRAP:
15549 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15550 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15551 if (u8TrapNo == X86_XCPT_PF)
15552 fFlags |= IEM_XCPT_FLAGS_CR2;
15553 switch (u8TrapNo)
15554 {
15555 case X86_XCPT_DF:
15556 case X86_XCPT_TS:
15557 case X86_XCPT_NP:
15558 case X86_XCPT_SS:
15559 case X86_XCPT_PF:
15560 case X86_XCPT_AC:
15561 fFlags |= IEM_XCPT_FLAGS_ERR;
15562 break;
15563
15564 case X86_XCPT_NMI:
15565 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15566 break;
15567 }
15568 break;
15569
15570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15571 }
15572
15573 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15574}
15575
15576
15577/**
15578 * Injects the active TRPM event.
15579 *
15580 * @returns Strict VBox status code.
15581 * @param pVCpu The cross context virtual CPU structure.
15582 */
15583VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15584{
15585#ifndef IEM_IMPLEMENTS_TASKSWITCH
15586 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15587#else
15588 uint8_t u8TrapNo;
15589 TRPMEVENT enmType;
15590 RTGCUINT uErrCode;
15591 RTGCUINTPTR uCr2;
15592 uint8_t cbInstr;
15593 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15594 if (RT_FAILURE(rc))
15595 return rc;
15596
15597 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15598
15599 /** @todo Are there any other codes that imply the event was successfully
15600 * delivered to the guest? See @bugref{6607}. */
15601 if ( rcStrict == VINF_SUCCESS
15602 || rcStrict == VINF_IEM_RAISED_XCPT)
15603 {
15604 TRPMResetTrap(pVCpu);
15605 }
15606 return rcStrict;
15607#endif
15608}
15609
15610
15611VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15612{
15613 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15614 return VERR_NOT_IMPLEMENTED;
15615}
15616
15617
15618VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15619{
15620 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15621 return VERR_NOT_IMPLEMENTED;
15622}
15623
15624
15625#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15626/**
15627 * Executes a IRET instruction with default operand size.
15628 *
15629 * This is for PATM.
15630 *
15631 * @returns VBox status code.
15632 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15633 * @param pCtxCore The register frame.
15634 */
15635VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15636{
15637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15638
15639 iemCtxCoreToCtx(pCtx, pCtxCore);
15640 iemInitDecoder(pVCpu);
15641 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15642 if (rcStrict == VINF_SUCCESS)
15643 iemCtxToCtxCore(pCtxCore, pCtx);
15644 else
15645 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15646 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15647 return rcStrict;
15648}
15649#endif
15650
15651
15652/**
15653 * Macro used by the IEMExec* method to check the given instruction length.
15654 *
15655 * Will return on failure!
15656 *
15657 * @param a_cbInstr The given instruction length.
15658 * @param a_cbMin The minimum length.
15659 */
15660#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15661 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15662 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15663
15664
15665/**
15666 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15667 *
15668 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15669 *
15670 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15672 * @param rcStrict The status code to fiddle.
15673 */
15674DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15675{
15676 iemUninitExec(pVCpu);
15677#ifdef IN_RC
15678 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15679 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15680#else
15681 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15682#endif
15683}
15684
15685
15686/**
15687 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15688 *
15689 * This API ASSUMES that the caller has already verified that the guest code is
15690 * allowed to access the I/O port. (The I/O port is in the DX register in the
15691 * guest state.)
15692 *
15693 * @returns Strict VBox status code.
15694 * @param pVCpu The cross context virtual CPU structure.
15695 * @param cbValue The size of the I/O port access (1, 2, or 4).
15696 * @param enmAddrMode The addressing mode.
15697 * @param fRepPrefix Indicates whether a repeat prefix is used
15698 * (doesn't matter which for this instruction).
15699 * @param cbInstr The instruction length in bytes.
15700 * @param iEffSeg The effective segment address.
15701 * @param fIoChecked Whether the access to the I/O port has been
15702 * checked or not. It's typically checked in the
15703 * HM scenario.
15704 */
15705VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15706 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15707{
15708 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15709 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15710
15711 /*
15712 * State init.
15713 */
15714 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15715
15716 /*
15717 * Switch orgy for getting to the right handler.
15718 */
15719 VBOXSTRICTRC rcStrict;
15720 if (fRepPrefix)
15721 {
15722 switch (enmAddrMode)
15723 {
15724 case IEMMODE_16BIT:
15725 switch (cbValue)
15726 {
15727 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15728 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15729 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15730 default:
15731 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15732 }
15733 break;
15734
15735 case IEMMODE_32BIT:
15736 switch (cbValue)
15737 {
15738 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15739 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15740 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15741 default:
15742 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15743 }
15744 break;
15745
15746 case IEMMODE_64BIT:
15747 switch (cbValue)
15748 {
15749 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15750 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15751 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15752 default:
15753 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15754 }
15755 break;
15756
15757 default:
15758 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15759 }
15760 }
15761 else
15762 {
15763 switch (enmAddrMode)
15764 {
15765 case IEMMODE_16BIT:
15766 switch (cbValue)
15767 {
15768 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15769 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15770 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15771 default:
15772 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15773 }
15774 break;
15775
15776 case IEMMODE_32BIT:
15777 switch (cbValue)
15778 {
15779 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15780 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15781 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15782 default:
15783 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15784 }
15785 break;
15786
15787 case IEMMODE_64BIT:
15788 switch (cbValue)
15789 {
15790 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15791 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15792 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15793 default:
15794 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15795 }
15796 break;
15797
15798 default:
15799 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15800 }
15801 }
15802
15803 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15804}
15805
15806
15807/**
15808 * Interface for HM and EM for executing string I/O IN (read) instructions.
15809 *
15810 * This API ASSUMES that the caller has already verified that the guest code is
15811 * allowed to access the I/O port. (The I/O port is in the DX register in the
15812 * guest state.)
15813 *
15814 * @returns Strict VBox status code.
15815 * @param pVCpu The cross context virtual CPU structure.
15816 * @param cbValue The size of the I/O port access (1, 2, or 4).
15817 * @param enmAddrMode The addressing mode.
15818 * @param fRepPrefix Indicates whether a repeat prefix is used
15819 * (doesn't matter which for this instruction).
15820 * @param cbInstr The instruction length in bytes.
15821 * @param fIoChecked Whether the access to the I/O port has been
15822 * checked or not. It's typically checked in the
15823 * HM scenario.
15824 */
15825VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15826 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15827{
15828 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15829
15830 /*
15831 * State init.
15832 */
15833 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15834
15835 /*
15836 * Switch orgy for getting to the right handler.
15837 */
15838 VBOXSTRICTRC rcStrict;
15839 if (fRepPrefix)
15840 {
15841 switch (enmAddrMode)
15842 {
15843 case IEMMODE_16BIT:
15844 switch (cbValue)
15845 {
15846 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15847 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15848 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15849 default:
15850 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15851 }
15852 break;
15853
15854 case IEMMODE_32BIT:
15855 switch (cbValue)
15856 {
15857 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15858 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15859 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15860 default:
15861 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15862 }
15863 break;
15864
15865 case IEMMODE_64BIT:
15866 switch (cbValue)
15867 {
15868 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15869 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15870 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15871 default:
15872 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15873 }
15874 break;
15875
15876 default:
15877 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15878 }
15879 }
15880 else
15881 {
15882 switch (enmAddrMode)
15883 {
15884 case IEMMODE_16BIT:
15885 switch (cbValue)
15886 {
15887 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15888 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15889 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15890 default:
15891 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15892 }
15893 break;
15894
15895 case IEMMODE_32BIT:
15896 switch (cbValue)
15897 {
15898 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15899 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15900 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15901 default:
15902 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15903 }
15904 break;
15905
15906 case IEMMODE_64BIT:
15907 switch (cbValue)
15908 {
15909 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15910 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15911 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15912 default:
15913 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15914 }
15915 break;
15916
15917 default:
15918 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15919 }
15920 }
15921
15922 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15923}
15924
15925
15926/**
15927 * Interface for rawmode to write execute an OUT instruction.
15928 *
15929 * @returns Strict VBox status code.
15930 * @param pVCpu The cross context virtual CPU structure.
15931 * @param cbInstr The instruction length in bytes.
15932 * @param u16Port The port to read.
15933 * @param cbReg The register size.
15934 *
15935 * @remarks In ring-0 not all of the state needs to be synced in.
15936 */
15937VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15938{
15939 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15940 Assert(cbReg <= 4 && cbReg != 3);
15941
15942 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15943 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15944 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15945}
15946
15947
15948/**
15949 * Interface for rawmode to write execute an IN instruction.
15950 *
15951 * @returns Strict VBox status code.
15952 * @param pVCpu The cross context virtual CPU structure.
15953 * @param cbInstr The instruction length in bytes.
15954 * @param u16Port The port to read.
15955 * @param cbReg The register size.
15956 */
15957VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15958{
15959 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15960 Assert(cbReg <= 4 && cbReg != 3);
15961
15962 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15963 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15964 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15965}
15966
15967
15968/**
15969 * Interface for HM and EM to write to a CRx register.
15970 *
15971 * @returns Strict VBox status code.
15972 * @param pVCpu The cross context virtual CPU structure.
15973 * @param cbInstr The instruction length in bytes.
15974 * @param iCrReg The control register number (destination).
15975 * @param iGReg The general purpose register number (source).
15976 *
15977 * @remarks In ring-0 not all of the state needs to be synced in.
15978 */
15979VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15980{
15981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15982 Assert(iCrReg < 16);
15983 Assert(iGReg < 16);
15984
15985 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15986 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15988}
15989
15990
15991/**
15992 * Interface for HM and EM to read from a CRx register.
15993 *
15994 * @returns Strict VBox status code.
15995 * @param pVCpu The cross context virtual CPU structure.
15996 * @param cbInstr The instruction length in bytes.
15997 * @param iGReg The general purpose register number (destination).
15998 * @param iCrReg The control register number (source).
15999 *
16000 * @remarks In ring-0 not all of the state needs to be synced in.
16001 */
16002VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
16003{
16004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16005 Assert(iCrReg < 16);
16006 Assert(iGReg < 16);
16007
16008 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16009 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16010 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16011}
16012
16013
16014/**
16015 * Interface for HM and EM to clear the CR0[TS] bit.
16016 *
16017 * @returns Strict VBox status code.
16018 * @param pVCpu The cross context virtual CPU structure.
16019 * @param cbInstr The instruction length in bytes.
16020 *
16021 * @remarks In ring-0 not all of the state needs to be synced in.
16022 */
16023VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16024{
16025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16026
16027 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16028 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16029 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16030}
16031
16032
16033/**
16034 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16035 *
16036 * @returns Strict VBox status code.
16037 * @param pVCpu The cross context virtual CPU structure.
16038 * @param cbInstr The instruction length in bytes.
16039 * @param uValue The value to load into CR0.
16040 *
16041 * @remarks In ring-0 not all of the state needs to be synced in.
16042 */
16043VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16044{
16045 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16046
16047 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16048 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16049 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16050}
16051
16052
16053/**
16054 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16055 *
16056 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16057 *
16058 * @returns Strict VBox status code.
16059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16060 * @param cbInstr The instruction length in bytes.
16061 * @remarks In ring-0 not all of the state needs to be synced in.
16062 * @thread EMT(pVCpu)
16063 */
16064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16065{
16066 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16067
16068 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16069 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16070 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16071}
16072
16073
16074/**
16075 * Interface for HM and EM to emulate the INVLPG instruction.
16076 *
16077 * @param pVCpu The cross context virtual CPU structure.
16078 * @param cbInstr The instruction length in bytes.
16079 * @param GCPtrPage The effective address of the page to invalidate.
16080 *
16081 * @remarks In ring-0 not all of the state needs to be synced in.
16082 */
16083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16084{
16085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16086
16087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16089 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16090}
16091
16092
16093/**
16094 * Interface for HM and EM to emulate the INVPCID instruction.
16095 *
16096 * @param pVCpu The cross context virtual CPU structure.
16097 * @param cbInstr The instruction length in bytes.
16098 * @param uType The invalidation type.
16099 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16100 *
16101 * @remarks In ring-0 not all of the state needs to be synced in.
16102 */
16103VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16104{
16105 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16106
16107 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16108 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16109 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16110}
16111
16112
16113/**
16114 * Checks if IEM is in the process of delivering an event (interrupt or
16115 * exception).
16116 *
16117 * @returns true if we're in the process of raising an interrupt or exception,
16118 * false otherwise.
16119 * @param pVCpu The cross context virtual CPU structure.
16120 * @param puVector Where to store the vector associated with the
16121 * currently delivered event, optional.
16122 * @param pfFlags Where to store th event delivery flags (see
16123 * IEM_XCPT_FLAGS_XXX), optional.
16124 * @param puErr Where to store the error code associated with the
16125 * event, optional.
16126 * @param puCr2 Where to store the CR2 associated with the event,
16127 * optional.
16128 * @remarks The caller should check the flags to determine if the error code and
16129 * CR2 are valid for the event.
16130 */
16131VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16132{
16133 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16134 if (fRaisingXcpt)
16135 {
16136 if (puVector)
16137 *puVector = pVCpu->iem.s.uCurXcpt;
16138 if (pfFlags)
16139 *pfFlags = pVCpu->iem.s.fCurXcpt;
16140 if (puErr)
16141 *puErr = pVCpu->iem.s.uCurXcptErr;
16142 if (puCr2)
16143 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16144 }
16145 return fRaisingXcpt;
16146}
16147
16148#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
16149/**
16150 * Interface for HM and EM to emulate the CLGI instruction.
16151 *
16152 * @returns Strict VBox status code.
16153 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16154 * @param cbInstr The instruction length in bytes.
16155 * @thread EMT(pVCpu)
16156 */
16157VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16158{
16159 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16160
16161 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16162 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16163 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16164}
16165
16166
16167/**
16168 * Interface for HM and EM to emulate the STGI instruction.
16169 *
16170 * @returns Strict VBox status code.
16171 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16172 * @param cbInstr The instruction length in bytes.
16173 * @thread EMT(pVCpu)
16174 */
16175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16176{
16177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16178
16179 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16180 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16181 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16182}
16183
16184
16185/**
16186 * Interface for HM and EM to emulate the VMLOAD instruction.
16187 *
16188 * @returns Strict VBox status code.
16189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16190 * @param cbInstr The instruction length in bytes.
16191 * @thread EMT(pVCpu)
16192 */
16193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16194{
16195 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16196
16197 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16199 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16200}
16201
16202
16203/**
16204 * Interface for HM and EM to emulate the VMSAVE instruction.
16205 *
16206 * @returns Strict VBox status code.
16207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16208 * @param cbInstr The instruction length in bytes.
16209 * @thread EMT(pVCpu)
16210 */
16211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16212{
16213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16214
16215 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16216 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16218}
16219
16220
16221/**
16222 * Interface for HM and EM to emulate the INVLPGA instruction.
16223 *
16224 * @returns Strict VBox status code.
16225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16226 * @param cbInstr The instruction length in bytes.
16227 * @thread EMT(pVCpu)
16228 */
16229VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16230{
16231 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16232
16233 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16234 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16235 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16236}
16237
16238
16239/**
16240 * Interface for HM and EM to emulate the VMRUN instruction.
16241 *
16242 * @returns Strict VBox status code.
16243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16244 * @param cbInstr The instruction length in bytes.
16245 * @thread EMT(pVCpu)
16246 */
16247VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16248{
16249 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16250
16251 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16252 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16253 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16254}
16255
16256
16257/**
16258 * Interface for HM and EM to emulate \#VMEXIT.
16259 *
16260 * @returns Strict VBox status code.
16261 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16262 * @param uExitCode The exit code.
16263 * @param uExitInfo1 The exit info. 1 field.
16264 * @param uExitInfo2 The exit info. 2 field.
16265 * @thread EMT(pVCpu)
16266 */
16267VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16268{
16269 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16270 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16271}
16272#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
16273
16274#ifdef IN_RING3
16275
16276/**
16277 * Handles the unlikely and probably fatal merge cases.
16278 *
16279 * @returns Merged status code.
16280 * @param rcStrict Current EM status code.
16281 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16282 * with @a rcStrict.
16283 * @param iMemMap The memory mapping index. For error reporting only.
16284 * @param pVCpu The cross context virtual CPU structure of the calling
16285 * thread, for error reporting only.
16286 */
16287DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16288 unsigned iMemMap, PVMCPU pVCpu)
16289{
16290 if (RT_FAILURE_NP(rcStrict))
16291 return rcStrict;
16292
16293 if (RT_FAILURE_NP(rcStrictCommit))
16294 return rcStrictCommit;
16295
16296 if (rcStrict == rcStrictCommit)
16297 return rcStrictCommit;
16298
16299 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16300 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16301 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16302 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16304 return VERR_IOM_FF_STATUS_IPE;
16305}
16306
16307
16308/**
16309 * Helper for IOMR3ProcessForceFlag.
16310 *
16311 * @returns Merged status code.
16312 * @param rcStrict Current EM status code.
16313 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16314 * with @a rcStrict.
16315 * @param iMemMap The memory mapping index. For error reporting only.
16316 * @param pVCpu The cross context virtual CPU structure of the calling
16317 * thread, for error reporting only.
16318 */
16319DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16320{
16321 /* Simple. */
16322 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16323 return rcStrictCommit;
16324
16325 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16326 return rcStrict;
16327
16328 /* EM scheduling status codes. */
16329 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16330 && rcStrict <= VINF_EM_LAST))
16331 {
16332 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16333 && rcStrictCommit <= VINF_EM_LAST))
16334 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16335 }
16336
16337 /* Unlikely */
16338 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16339}
16340
16341
16342/**
16343 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16344 *
16345 * @returns Merge between @a rcStrict and what the commit operation returned.
16346 * @param pVM The cross context VM structure.
16347 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16348 * @param rcStrict The status code returned by ring-0 or raw-mode.
16349 */
16350VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16351{
16352 /*
16353 * Reset the pending commit.
16354 */
16355 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16356 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16357 ("%#x %#x %#x\n",
16358 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16359 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16360
16361 /*
16362 * Commit the pending bounce buffers (usually just one).
16363 */
16364 unsigned cBufs = 0;
16365 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16366 while (iMemMap-- > 0)
16367 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16368 {
16369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16370 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16371 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16372
16373 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16374 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16375 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16376
16377 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16378 {
16379 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16381 pbBuf,
16382 cbFirst,
16383 PGMACCESSORIGIN_IEM);
16384 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16385 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16386 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16387 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16388 }
16389
16390 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16391 {
16392 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16394 pbBuf + cbFirst,
16395 cbSecond,
16396 PGMACCESSORIGIN_IEM);
16397 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16398 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16399 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16400 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16401 }
16402 cBufs++;
16403 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16404 }
16405
16406 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16407 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16408 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16409 pVCpu->iem.s.cActiveMappings = 0;
16410 return rcStrict;
16411}
16412
16413#endif /* IN_RING3 */
16414
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette