VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66986

Last change on this file since 66986 was 66986, checked in by vboxsync, 8 years ago

VMM/IEM: Added an assertion to IEMEvaluateRecursiveXcpt.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 633.9 KB
Line 
1/* $Id: IEMAll.cpp 66986 2017-05-19 14:21:55Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/**
373 * Gets the effective VEX.VVVV value.
374 *
375 * The 4th bit is ignored if not 64-bit code.
376 * @returns effective V-register value.
377 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
378 */
379#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
380 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
381
382/** @def IEM_USE_UNALIGNED_DATA_ACCESS
383 * Use unaligned accesses instead of elaborate byte assembly. */
384#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
385# define IEM_USE_UNALIGNED_DATA_ACCESS
386#endif
387
388#ifdef VBOX_WITH_NESTED_HWVIRT
389/**
390 * Check the common SVM instruction preconditions.
391 */
392# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
393 do { \
394 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
395 { \
396 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
397 return iemRaiseUndefinedOpcode(pVCpu); \
398 } \
399 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
400 { \
401 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
402 return iemRaiseUndefinedOpcode(pVCpu); \
403 } \
404 if (pVCpu->iem.s.uCpl != 0) \
405 { \
406 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
407 return iemRaiseGeneralProtectionFault0(pVCpu); \
408 } \
409 } while (0)
410
411/**
412 * Check if an SVM is enabled.
413 */
414# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
415
416/**
417 * Check if an SVM control/instruction intercept is set.
418 */
419# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
420
421/**
422 * Check if an SVM read CRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
425
426/**
427 * Check if an SVM write CRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
430
431/**
432 * Check if an SVM read DRx intercept is set.
433 */
434# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
435
436/**
437 * Check if an SVM write DRx intercept is set.
438 */
439# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
440
441/**
442 * Check if an SVM exception intercept is set.
443 */
444# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
445
446/**
447 * Invokes the SVM \#VMEXIT handler for the nested-guest.
448 */
449# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
450 do \
451 { \
452 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
453 (a_uExitInfo2)); \
454 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473/**
474 * Checks and handles an SVM MSR intercept.
475 */
476# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
477 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
478
479#else
480# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
481# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
482# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
483# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
484# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
485# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
486# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
487# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
488# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
489# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
490# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
491
492#endif /* VBOX_WITH_NESTED_HWVIRT */
493
494
495/*********************************************************************************************************************************
496* Global Variables *
497*********************************************************************************************************************************/
498extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
499
500
501/** Function table for the ADD instruction. */
502IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
503{
504 iemAImpl_add_u8, iemAImpl_add_u8_locked,
505 iemAImpl_add_u16, iemAImpl_add_u16_locked,
506 iemAImpl_add_u32, iemAImpl_add_u32_locked,
507 iemAImpl_add_u64, iemAImpl_add_u64_locked
508};
509
510/** Function table for the ADC instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
512{
513 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
514 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
515 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
516 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
517};
518
519/** Function table for the SUB instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
521{
522 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
523 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
524 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
525 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
526};
527
528/** Function table for the SBB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
530{
531 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
532 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
533 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
534 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
535};
536
537/** Function table for the OR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
539{
540 iemAImpl_or_u8, iemAImpl_or_u8_locked,
541 iemAImpl_or_u16, iemAImpl_or_u16_locked,
542 iemAImpl_or_u32, iemAImpl_or_u32_locked,
543 iemAImpl_or_u64, iemAImpl_or_u64_locked
544};
545
546/** Function table for the XOR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
548{
549 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
550 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
551 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
552 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
553};
554
555/** Function table for the AND instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
557{
558 iemAImpl_and_u8, iemAImpl_and_u8_locked,
559 iemAImpl_and_u16, iemAImpl_and_u16_locked,
560 iemAImpl_and_u32, iemAImpl_and_u32_locked,
561 iemAImpl_and_u64, iemAImpl_and_u64_locked
562};
563
564/** Function table for the CMP instruction.
565 * @remarks Making operand order ASSUMPTIONS.
566 */
567IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
568{
569 iemAImpl_cmp_u8, NULL,
570 iemAImpl_cmp_u16, NULL,
571 iemAImpl_cmp_u32, NULL,
572 iemAImpl_cmp_u64, NULL
573};
574
575/** Function table for the TEST instruction.
576 * @remarks Making operand order ASSUMPTIONS.
577 */
578IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
579{
580 iemAImpl_test_u8, NULL,
581 iemAImpl_test_u16, NULL,
582 iemAImpl_test_u32, NULL,
583 iemAImpl_test_u64, NULL
584};
585
586/** Function table for the BT instruction. */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
588{
589 NULL, NULL,
590 iemAImpl_bt_u16, NULL,
591 iemAImpl_bt_u32, NULL,
592 iemAImpl_bt_u64, NULL
593};
594
595/** Function table for the BTC instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
597{
598 NULL, NULL,
599 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
600 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
601 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
602};
603
604/** Function table for the BTR instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
606{
607 NULL, NULL,
608 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
609 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
610 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
611};
612
613/** Function table for the BTS instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
615{
616 NULL, NULL,
617 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
618 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
619 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
620};
621
622/** Function table for the BSF instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
624{
625 NULL, NULL,
626 iemAImpl_bsf_u16, NULL,
627 iemAImpl_bsf_u32, NULL,
628 iemAImpl_bsf_u64, NULL
629};
630
631/** Function table for the BSR instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
633{
634 NULL, NULL,
635 iemAImpl_bsr_u16, NULL,
636 iemAImpl_bsr_u32, NULL,
637 iemAImpl_bsr_u64, NULL
638};
639
640/** Function table for the IMUL instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
642{
643 NULL, NULL,
644 iemAImpl_imul_two_u16, NULL,
645 iemAImpl_imul_two_u32, NULL,
646 iemAImpl_imul_two_u64, NULL
647};
648
649/** Group 1 /r lookup table. */
650IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
651{
652 &g_iemAImpl_add,
653 &g_iemAImpl_or,
654 &g_iemAImpl_adc,
655 &g_iemAImpl_sbb,
656 &g_iemAImpl_and,
657 &g_iemAImpl_sub,
658 &g_iemAImpl_xor,
659 &g_iemAImpl_cmp
660};
661
662/** Function table for the INC instruction. */
663IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
664{
665 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
666 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
667 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
668 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
669};
670
671/** Function table for the DEC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
673{
674 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
675 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
676 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
677 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
678};
679
680/** Function table for the NEG instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
682{
683 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
684 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
685 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
686 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
687};
688
689/** Function table for the NOT instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
691{
692 iemAImpl_not_u8, iemAImpl_not_u8_locked,
693 iemAImpl_not_u16, iemAImpl_not_u16_locked,
694 iemAImpl_not_u32, iemAImpl_not_u32_locked,
695 iemAImpl_not_u64, iemAImpl_not_u64_locked
696};
697
698
699/** Function table for the ROL instruction. */
700IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
701{
702 iemAImpl_rol_u8,
703 iemAImpl_rol_u16,
704 iemAImpl_rol_u32,
705 iemAImpl_rol_u64
706};
707
708/** Function table for the ROR instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
710{
711 iemAImpl_ror_u8,
712 iemAImpl_ror_u16,
713 iemAImpl_ror_u32,
714 iemAImpl_ror_u64
715};
716
717/** Function table for the RCL instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
719{
720 iemAImpl_rcl_u8,
721 iemAImpl_rcl_u16,
722 iemAImpl_rcl_u32,
723 iemAImpl_rcl_u64
724};
725
726/** Function table for the RCR instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
728{
729 iemAImpl_rcr_u8,
730 iemAImpl_rcr_u16,
731 iemAImpl_rcr_u32,
732 iemAImpl_rcr_u64
733};
734
735/** Function table for the SHL instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
737{
738 iemAImpl_shl_u8,
739 iemAImpl_shl_u16,
740 iemAImpl_shl_u32,
741 iemAImpl_shl_u64
742};
743
744/** Function table for the SHR instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
746{
747 iemAImpl_shr_u8,
748 iemAImpl_shr_u16,
749 iemAImpl_shr_u32,
750 iemAImpl_shr_u64
751};
752
753/** Function table for the SAR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
755{
756 iemAImpl_sar_u8,
757 iemAImpl_sar_u16,
758 iemAImpl_sar_u32,
759 iemAImpl_sar_u64
760};
761
762
763/** Function table for the MUL instruction. */
764IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
765{
766 iemAImpl_mul_u8,
767 iemAImpl_mul_u16,
768 iemAImpl_mul_u32,
769 iemAImpl_mul_u64
770};
771
772/** Function table for the IMUL instruction working implicitly on rAX. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
774{
775 iemAImpl_imul_u8,
776 iemAImpl_imul_u16,
777 iemAImpl_imul_u32,
778 iemAImpl_imul_u64
779};
780
781/** Function table for the DIV instruction. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
783{
784 iemAImpl_div_u8,
785 iemAImpl_div_u16,
786 iemAImpl_div_u32,
787 iemAImpl_div_u64
788};
789
790/** Function table for the MUL instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
792{
793 iemAImpl_idiv_u8,
794 iemAImpl_idiv_u16,
795 iemAImpl_idiv_u32,
796 iemAImpl_idiv_u64
797};
798
799/** Function table for the SHLD instruction */
800IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
801{
802 iemAImpl_shld_u16,
803 iemAImpl_shld_u32,
804 iemAImpl_shld_u64,
805};
806
807/** Function table for the SHRD instruction */
808IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
809{
810 iemAImpl_shrd_u16,
811 iemAImpl_shrd_u32,
812 iemAImpl_shrd_u64,
813};
814
815
816/** Function table for the PUNPCKLBW instruction */
817IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
818/** Function table for the PUNPCKLBD instruction */
819IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
820/** Function table for the PUNPCKLDQ instruction */
821IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
822/** Function table for the PUNPCKLQDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
824
825/** Function table for the PUNPCKHBW instruction */
826IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
827/** Function table for the PUNPCKHBD instruction */
828IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
829/** Function table for the PUNPCKHDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
831/** Function table for the PUNPCKHQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
833
834/** Function table for the PXOR instruction */
835IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
836/** Function table for the PCMPEQB instruction */
837IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
838/** Function table for the PCMPEQW instruction */
839IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
840/** Function table for the PCMPEQD instruction */
841IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
842
843
844#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
845/** What IEM just wrote. */
846uint8_t g_abIemWrote[256];
847/** How much IEM just wrote. */
848size_t g_cbIemWrote;
849#endif
850
851
852/*********************************************************************************************************************************
853* Internal Functions *
854*********************************************************************************************************************************/
855IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
856IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
857IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
858IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
859/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
862IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
863IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
864IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
867IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
870IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
871IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
872#ifdef IEM_WITH_SETJMP
873DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
874DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
875DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
876DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
877DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
878#endif
879
880IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
881IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
882IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
883IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
884IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
885IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
886IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
887IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
888IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
889IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
890IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
891IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
892IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
893IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
894IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
895IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
896
897#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
898IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
899#endif
900IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
901IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
902
903#ifdef VBOX_WITH_NESTED_HWVIRT
904/**
905 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
906 * accordingly.
907 *
908 * @returns VBox strict status code.
909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
910 * @param u16Port The IO port being accessed.
911 * @param enmIoType The type of IO access.
912 * @param cbReg The IO operand size in bytes.
913 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
914 * @param iEffSeg The effective segment number.
915 * @param fRep Whether this is a repeating IO instruction (REP prefix).
916 * @param fStrIo Whether this is a string IO instruction.
917 * @param cbInstr The length of the IO instruction in bytes.
918 *
919 * @remarks This must be called only when IO instructions are intercepted by the
920 * nested-guest hypervisor.
921 */
922IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
923 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
924{
925 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
926 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
927 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
928
929 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
930 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
931
932 SVMIOIOEXITINFO IoExitInfo;
933 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
934 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
935 IoExitInfo.n.u1STR = fStrIo;
936 IoExitInfo.n.u1REP = fRep;
937 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
938 IoExitInfo.n.u1Type = enmIoType;
939 IoExitInfo.n.u16Port = u16Port;
940
941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
942 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
943}
944
945#else
946IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
947 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
948{
949 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
950 return VERR_IEM_IPE_9;
951}
952#endif /* VBOX_WITH_NESTED_HWVIRT */
953
954
955/**
956 * Sets the pass up status.
957 *
958 * @returns VINF_SUCCESS.
959 * @param pVCpu The cross context virtual CPU structure of the
960 * calling thread.
961 * @param rcPassUp The pass up status. Must be informational.
962 * VINF_SUCCESS is not allowed.
963 */
964IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
965{
966 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
967
968 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
969 if (rcOldPassUp == VINF_SUCCESS)
970 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
971 /* If both are EM scheduling codes, use EM priority rules. */
972 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
973 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
974 {
975 if (rcPassUp < rcOldPassUp)
976 {
977 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
978 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
979 }
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 }
983 /* Override EM scheduling with specific status code. */
984 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
985 {
986 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
987 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
988 }
989 /* Don't override specific status code, first come first served. */
990 else
991 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Calculates the CPU mode.
998 *
999 * This is mainly for updating IEMCPU::enmCpuMode.
1000 *
1001 * @returns CPU mode.
1002 * @param pCtx The register context for the CPU.
1003 */
1004DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
1005{
1006 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1007 return IEMMODE_64BIT;
1008 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1009 return IEMMODE_32BIT;
1010 return IEMMODE_16BIT;
1011}
1012
1013
1014/**
1015 * Initializes the execution state.
1016 *
1017 * @param pVCpu The cross context virtual CPU structure of the
1018 * calling thread.
1019 * @param fBypassHandlers Whether to bypass access handlers.
1020 *
1021 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1022 * side-effects in strict builds.
1023 */
1024DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1025{
1026 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1027
1028 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1029
1030#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1039#endif
1040
1041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1042 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1043#endif
1044 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1045 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1046#ifdef VBOX_STRICT
1047 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1048 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1049 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1050 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1051 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1052 pVCpu->iem.s.uRexReg = 127;
1053 pVCpu->iem.s.uRexB = 127;
1054 pVCpu->iem.s.uRexIndex = 127;
1055 pVCpu->iem.s.iEffSeg = 127;
1056 pVCpu->iem.s.idxPrefix = 127;
1057 pVCpu->iem.s.uVex3rdReg = 127;
1058 pVCpu->iem.s.uVexLength = 127;
1059 pVCpu->iem.s.fEvexStuff = 127;
1060 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1061# ifdef IEM_WITH_CODE_TLB
1062 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1063 pVCpu->iem.s.pbInstrBuf = NULL;
1064 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1065 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1066 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1067 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1068# else
1069 pVCpu->iem.s.offOpcode = 127;
1070 pVCpu->iem.s.cbOpcode = 127;
1071# endif
1072#endif
1073
1074 pVCpu->iem.s.cActiveMappings = 0;
1075 pVCpu->iem.s.iNextMapping = 0;
1076 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1077 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1078#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1079 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1080 && pCtx->cs.u64Base == 0
1081 && pCtx->cs.u32Limit == UINT32_MAX
1082 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1083 if (!pVCpu->iem.s.fInPatchCode)
1084 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1085#endif
1086
1087#ifdef IEM_VERIFICATION_MODE_FULL
1088 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1089 pVCpu->iem.s.fNoRem = true;
1090#endif
1091}
1092
1093
1094/**
1095 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 */
1100DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1101{
1102 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1103#ifdef IEM_VERIFICATION_MODE_FULL
1104 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1105#endif
1106#ifdef VBOX_STRICT
1107# ifdef IEM_WITH_CODE_TLB
1108 NOREF(pVCpu);
1109# else
1110 pVCpu->iem.s.cbOpcode = 0;
1111# endif
1112#else
1113 NOREF(pVCpu);
1114#endif
1115}
1116
1117
1118/**
1119 * Initializes the decoder state.
1120 *
1121 * iemReInitDecoder is mostly a copy of this function.
1122 *
1123 * @param pVCpu The cross context virtual CPU structure of the
1124 * calling thread.
1125 * @param fBypassHandlers Whether to bypass access handlers.
1126 */
1127DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1128{
1129 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1130
1131 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1132
1133#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1134 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1142#endif
1143
1144#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1145 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1146#endif
1147 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1148#ifdef IEM_VERIFICATION_MODE_FULL
1149 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1150 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1151#endif
1152 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1153 pVCpu->iem.s.enmCpuMode = enmMode;
1154 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1155 pVCpu->iem.s.enmEffAddrMode = enmMode;
1156 if (enmMode != IEMMODE_64BIT)
1157 {
1158 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1159 pVCpu->iem.s.enmEffOpSize = enmMode;
1160 }
1161 else
1162 {
1163 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1164 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1165 }
1166 pVCpu->iem.s.fPrefixes = 0;
1167 pVCpu->iem.s.uRexReg = 0;
1168 pVCpu->iem.s.uRexB = 0;
1169 pVCpu->iem.s.uRexIndex = 0;
1170 pVCpu->iem.s.idxPrefix = 0;
1171 pVCpu->iem.s.uVex3rdReg = 0;
1172 pVCpu->iem.s.uVexLength = 0;
1173 pVCpu->iem.s.fEvexStuff = 0;
1174 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1175#ifdef IEM_WITH_CODE_TLB
1176 pVCpu->iem.s.pbInstrBuf = NULL;
1177 pVCpu->iem.s.offInstrNextByte = 0;
1178 pVCpu->iem.s.offCurInstrStart = 0;
1179# ifdef VBOX_STRICT
1180 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1181 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1182 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1183# endif
1184#else
1185 pVCpu->iem.s.offOpcode = 0;
1186 pVCpu->iem.s.cbOpcode = 0;
1187#endif
1188 pVCpu->iem.s.cActiveMappings = 0;
1189 pVCpu->iem.s.iNextMapping = 0;
1190 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1191 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1192#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1193 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1194 && pCtx->cs.u64Base == 0
1195 && pCtx->cs.u32Limit == UINT32_MAX
1196 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1197 if (!pVCpu->iem.s.fInPatchCode)
1198 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1199#endif
1200
1201#ifdef DBGFTRACE_ENABLED
1202 switch (enmMode)
1203 {
1204 case IEMMODE_64BIT:
1205 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1206 break;
1207 case IEMMODE_32BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1209 break;
1210 case IEMMODE_16BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 }
1214#endif
1215}
1216
1217
1218/**
1219 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1220 *
1221 * This is mostly a copy of iemInitDecoder.
1222 *
1223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1224 */
1225DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1226{
1227 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1228
1229 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1230
1231#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1233 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1240#endif
1241
1242 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1243#ifdef IEM_VERIFICATION_MODE_FULL
1244 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1245 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1246#endif
1247 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1248 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1249 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1250 pVCpu->iem.s.enmEffAddrMode = enmMode;
1251 if (enmMode != IEMMODE_64BIT)
1252 {
1253 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1254 pVCpu->iem.s.enmEffOpSize = enmMode;
1255 }
1256 else
1257 {
1258 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1259 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1260 }
1261 pVCpu->iem.s.fPrefixes = 0;
1262 pVCpu->iem.s.uRexReg = 0;
1263 pVCpu->iem.s.uRexB = 0;
1264 pVCpu->iem.s.uRexIndex = 0;
1265 pVCpu->iem.s.idxPrefix = 0;
1266 pVCpu->iem.s.uVex3rdReg = 0;
1267 pVCpu->iem.s.uVexLength = 0;
1268 pVCpu->iem.s.fEvexStuff = 0;
1269 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1270#ifdef IEM_WITH_CODE_TLB
1271 if (pVCpu->iem.s.pbInstrBuf)
1272 {
1273 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1274 - pVCpu->iem.s.uInstrBufPc;
1275 if (off < pVCpu->iem.s.cbInstrBufTotal)
1276 {
1277 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1278 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1279 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1280 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1281 else
1282 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1283 }
1284 else
1285 {
1286 pVCpu->iem.s.pbInstrBuf = NULL;
1287 pVCpu->iem.s.offInstrNextByte = 0;
1288 pVCpu->iem.s.offCurInstrStart = 0;
1289 pVCpu->iem.s.cbInstrBuf = 0;
1290 pVCpu->iem.s.cbInstrBufTotal = 0;
1291 }
1292 }
1293 else
1294 {
1295 pVCpu->iem.s.offInstrNextByte = 0;
1296 pVCpu->iem.s.offCurInstrStart = 0;
1297 pVCpu->iem.s.cbInstrBuf = 0;
1298 pVCpu->iem.s.cbInstrBufTotal = 0;
1299 }
1300#else
1301 pVCpu->iem.s.cbOpcode = 0;
1302 pVCpu->iem.s.offOpcode = 0;
1303#endif
1304 Assert(pVCpu->iem.s.cActiveMappings == 0);
1305 pVCpu->iem.s.iNextMapping = 0;
1306 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1307 Assert(pVCpu->iem.s.fBypassHandlers == false);
1308#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1309 if (!pVCpu->iem.s.fInPatchCode)
1310 { /* likely */ }
1311 else
1312 {
1313 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1314 && pCtx->cs.u64Base == 0
1315 && pCtx->cs.u32Limit == UINT32_MAX
1316 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1317 if (!pVCpu->iem.s.fInPatchCode)
1318 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1319 }
1320#endif
1321
1322#ifdef DBGFTRACE_ENABLED
1323 switch (enmMode)
1324 {
1325 case IEMMODE_64BIT:
1326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1327 break;
1328 case IEMMODE_32BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1330 break;
1331 case IEMMODE_16BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 }
1335#endif
1336}
1337
1338
1339
1340/**
1341 * Prefetch opcodes the first time when starting executing.
1342 *
1343 * @returns Strict VBox status code.
1344 * @param pVCpu The cross context virtual CPU structure of the
1345 * calling thread.
1346 * @param fBypassHandlers Whether to bypass access handlers.
1347 */
1348IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1349{
1350#ifdef IEM_VERIFICATION_MODE_FULL
1351 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1352#endif
1353 iemInitDecoder(pVCpu, fBypassHandlers);
1354
1355#ifdef IEM_WITH_CODE_TLB
1356 /** @todo Do ITLB lookup here. */
1357
1358#else /* !IEM_WITH_CODE_TLB */
1359
1360 /*
1361 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1362 *
1363 * First translate CS:rIP to a physical address.
1364 */
1365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1366 uint32_t cbToTryRead;
1367 RTGCPTR GCPtrPC;
1368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1369 {
1370 cbToTryRead = PAGE_SIZE;
1371 GCPtrPC = pCtx->rip;
1372 if (IEM_IS_CANONICAL(GCPtrPC))
1373 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1374 else
1375 return iemRaiseGeneralProtectionFault0(pVCpu);
1376 }
1377 else
1378 {
1379 uint32_t GCPtrPC32 = pCtx->eip;
1380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1381 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1382 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1383 else
1384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1385 if (cbToTryRead) { /* likely */ }
1386 else /* overflowed */
1387 {
1388 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1389 cbToTryRead = UINT32_MAX;
1390 }
1391 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1392 Assert(GCPtrPC <= UINT32_MAX);
1393 }
1394
1395# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1396 /* Allow interpretation of patch manager code blocks since they can for
1397 instance throw #PFs for perfectly good reasons. */
1398 if (pVCpu->iem.s.fInPatchCode)
1399 {
1400 size_t cbRead = 0;
1401 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1402 AssertRCReturn(rc, rc);
1403 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1404 return VINF_SUCCESS;
1405 }
1406# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1407
1408 RTGCPHYS GCPhys;
1409 uint64_t fFlags;
1410 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1411 if (RT_SUCCESS(rc)) { /* probable */ }
1412 else
1413 {
1414 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1415 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1416 }
1417 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1418 else
1419 {
1420 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1421 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1422 }
1423 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1424 else
1425 {
1426 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1428 }
1429 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1430 /** @todo Check reserved bits and such stuff. PGM is better at doing
1431 * that, so do it when implementing the guest virtual address
1432 * TLB... */
1433
1434# ifdef IEM_VERIFICATION_MODE_FULL
1435 /*
1436 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1437 * instruction.
1438 */
1439 /** @todo optimize this differently by not using PGMPhysRead. */
1440 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1441 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1442 if ( offPrevOpcodes < cbOldOpcodes
1443 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1444 {
1445 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1446 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1447 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1448 pVCpu->iem.s.cbOpcode = cbNew;
1449 return VINF_SUCCESS;
1450 }
1451# endif
1452
1453 /*
1454 * Read the bytes at this address.
1455 */
1456 PVM pVM = pVCpu->CTX_SUFF(pVM);
1457# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1458 size_t cbActual;
1459 if ( PATMIsEnabled(pVM)
1460 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1461 {
1462 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1463 Assert(cbActual > 0);
1464 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1465 }
1466 else
1467# endif
1468 {
1469 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1470 if (cbToTryRead > cbLeftOnPage)
1471 cbToTryRead = cbLeftOnPage;
1472 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1473 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1474
1475 if (!pVCpu->iem.s.fBypassHandlers)
1476 {
1477 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1478 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1479 { /* likely */ }
1480 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1481 {
1482 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1483 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1484 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1485 }
1486 else
1487 {
1488 Log((RT_SUCCESS(rcStrict)
1489 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1490 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1492 return rcStrict;
1493 }
1494 }
1495 else
1496 {
1497 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1498 if (RT_SUCCESS(rc))
1499 { /* likely */ }
1500 else
1501 {
1502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1503 GCPtrPC, GCPhys, rc, cbToTryRead));
1504 return rc;
1505 }
1506 }
1507 pVCpu->iem.s.cbOpcode = cbToTryRead;
1508 }
1509#endif /* !IEM_WITH_CODE_TLB */
1510 return VINF_SUCCESS;
1511}
1512
1513
1514/**
1515 * Invalidates the IEM TLBs.
1516 *
1517 * This is called internally as well as by PGM when moving GC mappings.
1518 *
1519 * @returns
1520 * @param pVCpu The cross context virtual CPU structure of the calling
1521 * thread.
1522 * @param fVmm Set when PGM calls us with a remapping.
1523 */
1524VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1525{
1526#ifdef IEM_WITH_CODE_TLB
1527 pVCpu->iem.s.cbInstrBufTotal = 0;
1528 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539
1540#ifdef IEM_WITH_DATA_TLB
1541 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1542 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1543 { /* very likely */ }
1544 else
1545 {
1546 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1547 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1548 while (i-- > 0)
1549 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1550 }
1551#endif
1552 NOREF(pVCpu); NOREF(fVmm);
1553}
1554
1555
1556/**
1557 * Invalidates a page in the TLBs.
1558 *
1559 * @param pVCpu The cross context virtual CPU structure of the calling
1560 * thread.
1561 * @param GCPtr The address of the page to invalidate
1562 */
1563VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1564{
1565#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1566 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1567 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1568 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1569 uintptr_t idx = (uint8_t)GCPtr;
1570
1571# ifdef IEM_WITH_CODE_TLB
1572 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1573 {
1574 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1575 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1576 pVCpu->iem.s.cbInstrBufTotal = 0;
1577 }
1578# endif
1579
1580# ifdef IEM_WITH_DATA_TLB
1581 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1582 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1583# endif
1584#else
1585 NOREF(pVCpu); NOREF(GCPtr);
1586#endif
1587}
1588
1589
1590/**
1591 * Invalidates the host physical aspects of the IEM TLBs.
1592 *
1593 * This is called internally as well as by PGM when moving GC mappings.
1594 *
1595 * @param pVCpu The cross context virtual CPU structure of the calling
1596 * thread.
1597 */
1598VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1599{
1600#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1601 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1602
1603# ifdef IEM_WITH_CODE_TLB
1604 pVCpu->iem.s.cbInstrBufTotal = 0;
1605# endif
1606 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1607 if (uTlbPhysRev != 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1610 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1611 }
1612 else
1613 {
1614 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1615 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1616
1617 unsigned i;
1618# ifdef IEM_WITH_CODE_TLB
1619 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1620 while (i-- > 0)
1621 {
1622 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1623 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1624 }
1625# endif
1626# ifdef IEM_WITH_DATA_TLB
1627 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1628 while (i-- > 0)
1629 {
1630 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1631 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1632 }
1633# endif
1634 }
1635#else
1636 NOREF(pVCpu);
1637#endif
1638}
1639
1640
1641/**
1642 * Invalidates the host physical aspects of the IEM TLBs.
1643 *
1644 * This is called internally as well as by PGM when moving GC mappings.
1645 *
1646 * @param pVM The cross context VM structure.
1647 *
1648 * @remarks Caller holds the PGM lock.
1649 */
1650VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1651{
1652 RT_NOREF_PV(pVM);
1653}
1654
1655#ifdef IEM_WITH_CODE_TLB
1656
1657/**
1658 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1659 * failure and jumps.
1660 *
1661 * We end up here for a number of reasons:
1662 * - pbInstrBuf isn't yet initialized.
1663 * - Advancing beyond the buffer boundrary (e.g. cross page).
1664 * - Advancing beyond the CS segment limit.
1665 * - Fetching from non-mappable page (e.g. MMIO).
1666 *
1667 * @param pVCpu The cross context virtual CPU structure of the
1668 * calling thread.
1669 * @param pvDst Where to return the bytes.
1670 * @param cbDst Number of bytes to read.
1671 *
1672 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1673 */
1674IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1675{
1676#ifdef IN_RING3
1677//__debugbreak();
1678 for (;;)
1679 {
1680 Assert(cbDst <= 8);
1681 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1682
1683 /*
1684 * We might have a partial buffer match, deal with that first to make the
1685 * rest simpler. This is the first part of the cross page/buffer case.
1686 */
1687 if (pVCpu->iem.s.pbInstrBuf != NULL)
1688 {
1689 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1690 {
1691 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1692 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1693 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1694
1695 cbDst -= cbCopy;
1696 pvDst = (uint8_t *)pvDst + cbCopy;
1697 offBuf += cbCopy;
1698 pVCpu->iem.s.offInstrNextByte += offBuf;
1699 }
1700 }
1701
1702 /*
1703 * Check segment limit, figuring how much we're allowed to access at this point.
1704 *
1705 * We will fault immediately if RIP is past the segment limit / in non-canonical
1706 * territory. If we do continue, there are one or more bytes to read before we
1707 * end up in trouble and we need to do that first before faulting.
1708 */
1709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1710 RTGCPTR GCPtrFirst;
1711 uint32_t cbMaxRead;
1712 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1713 {
1714 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1715 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1716 { /* likely */ }
1717 else
1718 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1719 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1720 }
1721 else
1722 {
1723 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1724 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1725 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1726 { /* likely */ }
1727 else
1728 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1729 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1730 if (cbMaxRead != 0)
1731 { /* likely */ }
1732 else
1733 {
1734 /* Overflowed because address is 0 and limit is max. */
1735 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1736 cbMaxRead = X86_PAGE_SIZE;
1737 }
1738 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1739 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1740 if (cbMaxRead2 < cbMaxRead)
1741 cbMaxRead = cbMaxRead2;
1742 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1743 }
1744
1745 /*
1746 * Get the TLB entry for this piece of code.
1747 */
1748 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1749 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1750 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1751 if (pTlbe->uTag == uTag)
1752 {
1753 /* likely when executing lots of code, otherwise unlikely */
1754# ifdef VBOX_WITH_STATISTICS
1755 pVCpu->iem.s.CodeTlb.cTlbHits++;
1756# endif
1757 }
1758 else
1759 {
1760 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1761# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1762 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1763 {
1764 pTlbe->uTag = uTag;
1765 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1766 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1767 pTlbe->GCPhys = NIL_RTGCPHYS;
1768 pTlbe->pbMappingR3 = NULL;
1769 }
1770 else
1771# endif
1772 {
1773 RTGCPHYS GCPhys;
1774 uint64_t fFlags;
1775 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1776 if (RT_FAILURE(rc))
1777 {
1778 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1779 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1780 }
1781
1782 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1783 pTlbe->uTag = uTag;
1784 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1785 pTlbe->GCPhys = GCPhys;
1786 pTlbe->pbMappingR3 = NULL;
1787 }
1788 }
1789
1790 /*
1791 * Check TLB page table level access flags.
1792 */
1793 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1794 {
1795 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1796 {
1797 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1798 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1799 }
1800 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1801 {
1802 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1803 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1804 }
1805 }
1806
1807# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1808 /*
1809 * Allow interpretation of patch manager code blocks since they can for
1810 * instance throw #PFs for perfectly good reasons.
1811 */
1812 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1813 { /* no unlikely */ }
1814 else
1815 {
1816 /** @todo Could be optimized this a little in ring-3 if we liked. */
1817 size_t cbRead = 0;
1818 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1819 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1820 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1821 return;
1822 }
1823# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1824
1825 /*
1826 * Look up the physical page info if necessary.
1827 */
1828 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1829 { /* not necessary */ }
1830 else
1831 {
1832 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1833 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1835 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1836 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1837 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1838 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1839 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1840 }
1841
1842# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1843 /*
1844 * Try do a direct read using the pbMappingR3 pointer.
1845 */
1846 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1847 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1848 {
1849 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1850 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1851 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1852 {
1853 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1854 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1855 }
1856 else
1857 {
1858 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1859 Assert(cbInstr < cbMaxRead);
1860 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1861 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1862 }
1863 if (cbDst <= cbMaxRead)
1864 {
1865 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1866 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1867 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1868 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1869 return;
1870 }
1871 pVCpu->iem.s.pbInstrBuf = NULL;
1872
1873 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1874 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1875 }
1876 else
1877# endif
1878#if 0
1879 /*
1880 * If there is no special read handling, so we can read a bit more and
1881 * put it in the prefetch buffer.
1882 */
1883 if ( cbDst < cbMaxRead
1884 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1885 {
1886 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1887 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1888 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1889 { /* likely */ }
1890 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1891 {
1892 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1893 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1894 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1895 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1896 }
1897 else
1898 {
1899 Log((RT_SUCCESS(rcStrict)
1900 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1901 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1902 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1903 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1904 }
1905 }
1906 /*
1907 * Special read handling, so only read exactly what's needed.
1908 * This is a highly unlikely scenario.
1909 */
1910 else
1911#endif
1912 {
1913 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1914 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1915 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1916 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1917 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1918 { /* likely */ }
1919 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1922 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1923 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1924 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1925 }
1926 else
1927 {
1928 Log((RT_SUCCESS(rcStrict)
1929 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1930 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1931 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1932 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1933 }
1934 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1935 if (cbToRead == cbDst)
1936 return;
1937 }
1938
1939 /*
1940 * More to read, loop.
1941 */
1942 cbDst -= cbMaxRead;
1943 pvDst = (uint8_t *)pvDst + cbMaxRead;
1944 }
1945#else
1946 RT_NOREF(pvDst, cbDst);
1947 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1948#endif
1949}
1950
1951#else
1952
1953/**
1954 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1955 * exception if it fails.
1956 *
1957 * @returns Strict VBox status code.
1958 * @param pVCpu The cross context virtual CPU structure of the
1959 * calling thread.
1960 * @param cbMin The minimum number of bytes relative offOpcode
1961 * that must be read.
1962 */
1963IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1964{
1965 /*
1966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1967 *
1968 * First translate CS:rIP to a physical address.
1969 */
1970 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1971 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1972 uint32_t cbToTryRead;
1973 RTGCPTR GCPtrNext;
1974 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1975 {
1976 cbToTryRead = PAGE_SIZE;
1977 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1978 if (!IEM_IS_CANONICAL(GCPtrNext))
1979 return iemRaiseGeneralProtectionFault0(pVCpu);
1980 }
1981 else
1982 {
1983 uint32_t GCPtrNext32 = pCtx->eip;
1984 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1985 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1986 if (GCPtrNext32 > pCtx->cs.u32Limit)
1987 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1988 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1989 if (!cbToTryRead) /* overflowed */
1990 {
1991 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1992 cbToTryRead = UINT32_MAX;
1993 /** @todo check out wrapping around the code segment. */
1994 }
1995 if (cbToTryRead < cbMin - cbLeft)
1996 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1997 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1998 }
1999
2000 /* Only read up to the end of the page, and make sure we don't read more
2001 than the opcode buffer can hold. */
2002 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2003 if (cbToTryRead > cbLeftOnPage)
2004 cbToTryRead = cbLeftOnPage;
2005 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2006 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2007/** @todo r=bird: Convert assertion into undefined opcode exception? */
2008 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2009
2010# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2011 /* Allow interpretation of patch manager code blocks since they can for
2012 instance throw #PFs for perfectly good reasons. */
2013 if (pVCpu->iem.s.fInPatchCode)
2014 {
2015 size_t cbRead = 0;
2016 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2017 AssertRCReturn(rc, rc);
2018 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2019 return VINF_SUCCESS;
2020 }
2021# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2022
2023 RTGCPHYS GCPhys;
2024 uint64_t fFlags;
2025 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2026 if (RT_FAILURE(rc))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2030 }
2031 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2032 {
2033 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2034 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2035 }
2036 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2037 {
2038 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2039 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2040 }
2041 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2042 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2043 /** @todo Check reserved bits and such stuff. PGM is better at doing
2044 * that, so do it when implementing the guest virtual address
2045 * TLB... */
2046
2047 /*
2048 * Read the bytes at this address.
2049 *
2050 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2051 * and since PATM should only patch the start of an instruction there
2052 * should be no need to check again here.
2053 */
2054 if (!pVCpu->iem.s.fBypassHandlers)
2055 {
2056 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2057 cbToTryRead, PGMACCESSORIGIN_IEM);
2058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2059 { /* likely */ }
2060 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2061 {
2062 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2063 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2064 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2065 }
2066 else
2067 {
2068 Log((RT_SUCCESS(rcStrict)
2069 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2070 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2071 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2072 return rcStrict;
2073 }
2074 }
2075 else
2076 {
2077 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2078 if (RT_SUCCESS(rc))
2079 { /* likely */ }
2080 else
2081 {
2082 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2083 return rc;
2084 }
2085 }
2086 pVCpu->iem.s.cbOpcode += cbToTryRead;
2087 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2088
2089 return VINF_SUCCESS;
2090}
2091
2092#endif /* !IEM_WITH_CODE_TLB */
2093#ifndef IEM_WITH_SETJMP
2094
2095/**
2096 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2097 *
2098 * @returns Strict VBox status code.
2099 * @param pVCpu The cross context virtual CPU structure of the
2100 * calling thread.
2101 * @param pb Where to return the opcode byte.
2102 */
2103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2104{
2105 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2106 if (rcStrict == VINF_SUCCESS)
2107 {
2108 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2109 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2110 pVCpu->iem.s.offOpcode = offOpcode + 1;
2111 }
2112 else
2113 *pb = 0;
2114 return rcStrict;
2115}
2116
2117
2118/**
2119 * Fetches the next opcode byte.
2120 *
2121 * @returns Strict VBox status code.
2122 * @param pVCpu The cross context virtual CPU structure of the
2123 * calling thread.
2124 * @param pu8 Where to return the opcode byte.
2125 */
2126DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2127{
2128 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2129 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2130 {
2131 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2132 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2133 return VINF_SUCCESS;
2134 }
2135 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2136}
2137
2138#else /* IEM_WITH_SETJMP */
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2142 *
2143 * @returns The opcode byte.
2144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2145 */
2146DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2147{
2148# ifdef IEM_WITH_CODE_TLB
2149 uint8_t u8;
2150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2151 return u8;
2152# else
2153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2154 if (rcStrict == VINF_SUCCESS)
2155 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2156 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2157# endif
2158}
2159
2160
2161/**
2162 * Fetches the next opcode byte, longjmp on error.
2163 *
2164 * @returns The opcode byte.
2165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2166 */
2167DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2168{
2169# ifdef IEM_WITH_CODE_TLB
2170 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2171 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2172 if (RT_LIKELY( pbBuf != NULL
2173 && offBuf < pVCpu->iem.s.cbInstrBuf))
2174 {
2175 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2176 return pbBuf[offBuf];
2177 }
2178# else
2179 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2180 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2181 {
2182 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2183 return pVCpu->iem.s.abOpcode[offOpcode];
2184 }
2185# endif
2186 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2187}
2188
2189#endif /* IEM_WITH_SETJMP */
2190
2191/**
2192 * Fetches the next opcode byte, returns automatically on failure.
2193 *
2194 * @param a_pu8 Where to return the opcode byte.
2195 * @remark Implicitly references pVCpu.
2196 */
2197#ifndef IEM_WITH_SETJMP
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2199 do \
2200 { \
2201 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2202 if (rcStrict2 == VINF_SUCCESS) \
2203 { /* likely */ } \
2204 else \
2205 return rcStrict2; \
2206 } while (0)
2207#else
2208# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2209#endif /* IEM_WITH_SETJMP */
2210
2211
2212#ifndef IEM_WITH_SETJMP
2213/**
2214 * Fetches the next signed byte from the opcode stream.
2215 *
2216 * @returns Strict VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2218 * @param pi8 Where to return the signed byte.
2219 */
2220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2221{
2222 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2223}
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream, returning automatically
2229 * on failure.
2230 *
2231 * @param a_pi8 Where to return the signed byte.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else /* IEM_WITH_SETJMP */
2243# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244
2245#endif /* IEM_WITH_SETJMP */
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode dword.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 uint8_t u8;
2259 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2260 if (rcStrict == VINF_SUCCESS)
2261 *pu16 = (int8_t)u8;
2262 return rcStrict;
2263}
2264
2265
2266/**
2267 * Fetches the next signed byte from the opcode stream, extending it to
2268 * unsigned 16-bit.
2269 *
2270 * @returns Strict VBox status code.
2271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2272 * @param pu16 Where to return the unsigned word.
2273 */
2274DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2275{
2276 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2277 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2278 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2279
2280 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2281 pVCpu->iem.s.offOpcode = offOpcode + 1;
2282 return VINF_SUCCESS;
2283}
2284
2285#endif /* !IEM_WITH_SETJMP */
2286
2287/**
2288 * Fetches the next signed byte from the opcode stream and sign-extending it to
2289 * a word, returning automatically on failure.
2290 *
2291 * @param a_pu16 Where to return the word.
2292 * @remark Implicitly references pVCpu.
2293 */
2294#ifndef IEM_WITH_SETJMP
2295# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2296 do \
2297 { \
2298 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2299 if (rcStrict2 != VINF_SUCCESS) \
2300 return rcStrict2; \
2301 } while (0)
2302#else
2303# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2304#endif
2305
2306#ifndef IEM_WITH_SETJMP
2307
2308/**
2309 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2310 *
2311 * @returns Strict VBox status code.
2312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2313 * @param pu32 Where to return the opcode dword.
2314 */
2315DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2316{
2317 uint8_t u8;
2318 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2319 if (rcStrict == VINF_SUCCESS)
2320 *pu32 = (int8_t)u8;
2321 return rcStrict;
2322}
2323
2324
2325/**
2326 * Fetches the next signed byte from the opcode stream, extending it to
2327 * unsigned 32-bit.
2328 *
2329 * @returns Strict VBox status code.
2330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2331 * @param pu32 Where to return the unsigned dword.
2332 */
2333DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2334{
2335 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2336 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2337 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2338
2339 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2340 pVCpu->iem.s.offOpcode = offOpcode + 1;
2341 return VINF_SUCCESS;
2342}
2343
2344#endif /* !IEM_WITH_SETJMP */
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu32 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365#ifndef IEM_WITH_SETJMP
2366
2367/**
2368 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2372 * @param pu64 Where to return the opcode qword.
2373 */
2374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2375{
2376 uint8_t u8;
2377 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2378 if (rcStrict == VINF_SUCCESS)
2379 *pu64 = (int8_t)u8;
2380 return rcStrict;
2381}
2382
2383
2384/**
2385 * Fetches the next signed byte from the opcode stream, extending it to
2386 * unsigned 64-bit.
2387 *
2388 * @returns Strict VBox status code.
2389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2390 * @param pu64 Where to return the unsigned qword.
2391 */
2392DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2393{
2394 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2395 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2396 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2397
2398 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2399 pVCpu->iem.s.offOpcode = offOpcode + 1;
2400 return VINF_SUCCESS;
2401}
2402
2403#endif /* !IEM_WITH_SETJMP */
2404
2405
2406/**
2407 * Fetches the next signed byte from the opcode stream and sign-extending it to
2408 * a word, returning automatically on failure.
2409 *
2410 * @param a_pu64 Where to return the word.
2411 * @remark Implicitly references pVCpu.
2412 */
2413#ifndef IEM_WITH_SETJMP
2414# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2415 do \
2416 { \
2417 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2418 if (rcStrict2 != VINF_SUCCESS) \
2419 return rcStrict2; \
2420 } while (0)
2421#else
2422# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2423#endif
2424
2425
2426#ifndef IEM_WITH_SETJMP
2427
2428/**
2429 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2430 *
2431 * @returns Strict VBox status code.
2432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2433 * @param pu16 Where to return the opcode word.
2434 */
2435DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2436{
2437 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2438 if (rcStrict == VINF_SUCCESS)
2439 {
2440 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2441# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2442 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2443# else
2444 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2445# endif
2446 pVCpu->iem.s.offOpcode = offOpcode + 2;
2447 }
2448 else
2449 *pu16 = 0;
2450 return rcStrict;
2451}
2452
2453
2454/**
2455 * Fetches the next opcode word.
2456 *
2457 * @returns Strict VBox status code.
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pu16 Where to return the opcode word.
2460 */
2461DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2462{
2463 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2464 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2465 {
2466 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2467# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2468 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2469# else
2470 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2471# endif
2472 return VINF_SUCCESS;
2473 }
2474 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2475}
2476
2477#else /* IEM_WITH_SETJMP */
2478
2479/**
2480 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2481 *
2482 * @returns The opcode word.
2483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2484 */
2485DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2486{
2487# ifdef IEM_WITH_CODE_TLB
2488 uint16_t u16;
2489 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2490 return u16;
2491# else
2492 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2493 if (rcStrict == VINF_SUCCESS)
2494 {
2495 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2496 pVCpu->iem.s.offOpcode += 2;
2497# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2498 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2499# else
2500 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2501# endif
2502 }
2503 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2504# endif
2505}
2506
2507
2508/**
2509 * Fetches the next opcode word, longjmp on error.
2510 *
2511 * @returns The opcode word.
2512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2513 */
2514DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2515{
2516# ifdef IEM_WITH_CODE_TLB
2517 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2518 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2519 if (RT_LIKELY( pbBuf != NULL
2520 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2521 {
2522 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2523# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2524 return *(uint16_t const *)&pbBuf[offBuf];
2525# else
2526 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2527# endif
2528 }
2529# else
2530 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2531 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2532 {
2533 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2534# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2535 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2536# else
2537 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2538# endif
2539 }
2540# endif
2541 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2542}
2543
2544#endif /* IEM_WITH_SETJMP */
2545
2546
2547/**
2548 * Fetches the next opcode word, returns automatically on failure.
2549 *
2550 * @param a_pu16 Where to return the opcode word.
2551 * @remark Implicitly references pVCpu.
2552 */
2553#ifndef IEM_WITH_SETJMP
2554# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2555 do \
2556 { \
2557 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2558 if (rcStrict2 != VINF_SUCCESS) \
2559 return rcStrict2; \
2560 } while (0)
2561#else
2562# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2563#endif
2564
2565#ifndef IEM_WITH_SETJMP
2566
2567/**
2568 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2572 * @param pu32 Where to return the opcode double word.
2573 */
2574DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2575{
2576 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2577 if (rcStrict == VINF_SUCCESS)
2578 {
2579 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2580 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2581 pVCpu->iem.s.offOpcode = offOpcode + 2;
2582 }
2583 else
2584 *pu32 = 0;
2585 return rcStrict;
2586}
2587
2588
2589/**
2590 * Fetches the next opcode word, zero extending it to a double word.
2591 *
2592 * @returns Strict VBox status code.
2593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2594 * @param pu32 Where to return the opcode double word.
2595 */
2596DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2597{
2598 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2599 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2600 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2601
2602 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2603 pVCpu->iem.s.offOpcode = offOpcode + 2;
2604 return VINF_SUCCESS;
2605}
2606
2607#endif /* !IEM_WITH_SETJMP */
2608
2609
2610/**
2611 * Fetches the next opcode word and zero extends it to a double word, returns
2612 * automatically on failure.
2613 *
2614 * @param a_pu32 Where to return the opcode double word.
2615 * @remark Implicitly references pVCpu.
2616 */
2617#ifndef IEM_WITH_SETJMP
2618# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2619 do \
2620 { \
2621 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2622 if (rcStrict2 != VINF_SUCCESS) \
2623 return rcStrict2; \
2624 } while (0)
2625#else
2626# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2627#endif
2628
2629#ifndef IEM_WITH_SETJMP
2630
2631/**
2632 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2633 *
2634 * @returns Strict VBox status code.
2635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2636 * @param pu64 Where to return the opcode quad word.
2637 */
2638DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2639{
2640 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2641 if (rcStrict == VINF_SUCCESS)
2642 {
2643 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2644 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2645 pVCpu->iem.s.offOpcode = offOpcode + 2;
2646 }
2647 else
2648 *pu64 = 0;
2649 return rcStrict;
2650}
2651
2652
2653/**
2654 * Fetches the next opcode word, zero extending it to a quad word.
2655 *
2656 * @returns Strict VBox status code.
2657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2658 * @param pu64 Where to return the opcode quad word.
2659 */
2660DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2661{
2662 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2663 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2664 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2665
2666 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2667 pVCpu->iem.s.offOpcode = offOpcode + 2;
2668 return VINF_SUCCESS;
2669}
2670
2671#endif /* !IEM_WITH_SETJMP */
2672
2673/**
2674 * Fetches the next opcode word and zero extends it to a quad word, returns
2675 * automatically on failure.
2676 *
2677 * @param a_pu64 Where to return the opcode quad word.
2678 * @remark Implicitly references pVCpu.
2679 */
2680#ifndef IEM_WITH_SETJMP
2681# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2682 do \
2683 { \
2684 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2685 if (rcStrict2 != VINF_SUCCESS) \
2686 return rcStrict2; \
2687 } while (0)
2688#else
2689# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2690#endif
2691
2692
2693#ifndef IEM_WITH_SETJMP
2694/**
2695 * Fetches the next signed word from the opcode stream.
2696 *
2697 * @returns Strict VBox status code.
2698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2699 * @param pi16 Where to return the signed word.
2700 */
2701DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2702{
2703 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2704}
2705#endif /* !IEM_WITH_SETJMP */
2706
2707
2708/**
2709 * Fetches the next signed word from the opcode stream, returning automatically
2710 * on failure.
2711 *
2712 * @param a_pi16 Where to return the signed word.
2713 * @remark Implicitly references pVCpu.
2714 */
2715#ifndef IEM_WITH_SETJMP
2716# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2717 do \
2718 { \
2719 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2720 if (rcStrict2 != VINF_SUCCESS) \
2721 return rcStrict2; \
2722 } while (0)
2723#else
2724# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2725#endif
2726
2727#ifndef IEM_WITH_SETJMP
2728
2729/**
2730 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2731 *
2732 * @returns Strict VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2734 * @param pu32 Where to return the opcode dword.
2735 */
2736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2737{
2738 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2739 if (rcStrict == VINF_SUCCESS)
2740 {
2741 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2742# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2743 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2744# else
2745 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2746 pVCpu->iem.s.abOpcode[offOpcode + 1],
2747 pVCpu->iem.s.abOpcode[offOpcode + 2],
2748 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2749# endif
2750 pVCpu->iem.s.offOpcode = offOpcode + 4;
2751 }
2752 else
2753 *pu32 = 0;
2754 return rcStrict;
2755}
2756
2757
2758/**
2759 * Fetches the next opcode dword.
2760 *
2761 * @returns Strict VBox status code.
2762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2763 * @param pu32 Where to return the opcode double word.
2764 */
2765DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2766{
2767 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2768 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2769 {
2770 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2771# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2772 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2773# else
2774 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2775 pVCpu->iem.s.abOpcode[offOpcode + 1],
2776 pVCpu->iem.s.abOpcode[offOpcode + 2],
2777 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2778# endif
2779 return VINF_SUCCESS;
2780 }
2781 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2782}
2783
2784#else /* !IEM_WITH_SETJMP */
2785
2786/**
2787 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2788 *
2789 * @returns The opcode dword.
2790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2791 */
2792DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2793{
2794# ifdef IEM_WITH_CODE_TLB
2795 uint32_t u32;
2796 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2797 return u32;
2798# else
2799 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2800 if (rcStrict == VINF_SUCCESS)
2801 {
2802 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2803 pVCpu->iem.s.offOpcode = offOpcode + 4;
2804# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2805 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2806# else
2807 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2808 pVCpu->iem.s.abOpcode[offOpcode + 1],
2809 pVCpu->iem.s.abOpcode[offOpcode + 2],
2810 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2811# endif
2812 }
2813 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2814# endif
2815}
2816
2817
2818/**
2819 * Fetches the next opcode dword, longjmp on error.
2820 *
2821 * @returns The opcode dword.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 */
2824DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2825{
2826# ifdef IEM_WITH_CODE_TLB
2827 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2828 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2829 if (RT_LIKELY( pbBuf != NULL
2830 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2831 {
2832 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2833# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2834 return *(uint32_t const *)&pbBuf[offBuf];
2835# else
2836 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2837 pbBuf[offBuf + 1],
2838 pbBuf[offBuf + 2],
2839 pbBuf[offBuf + 3]);
2840# endif
2841 }
2842# else
2843 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2844 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2845 {
2846 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2847# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2848 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2849# else
2850 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2851 pVCpu->iem.s.abOpcode[offOpcode + 1],
2852 pVCpu->iem.s.abOpcode[offOpcode + 2],
2853 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2854# endif
2855 }
2856# endif
2857 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2858}
2859
2860#endif /* !IEM_WITH_SETJMP */
2861
2862
2863/**
2864 * Fetches the next opcode dword, returns automatically on failure.
2865 *
2866 * @param a_pu32 Where to return the opcode dword.
2867 * @remark Implicitly references pVCpu.
2868 */
2869#ifndef IEM_WITH_SETJMP
2870# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2871 do \
2872 { \
2873 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2874 if (rcStrict2 != VINF_SUCCESS) \
2875 return rcStrict2; \
2876 } while (0)
2877#else
2878# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2879#endif
2880
2881#ifndef IEM_WITH_SETJMP
2882
2883/**
2884 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2885 *
2886 * @returns Strict VBox status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2888 * @param pu64 Where to return the opcode dword.
2889 */
2890DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2891{
2892 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2893 if (rcStrict == VINF_SUCCESS)
2894 {
2895 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2896 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900 pVCpu->iem.s.offOpcode = offOpcode + 4;
2901 }
2902 else
2903 *pu64 = 0;
2904 return rcStrict;
2905}
2906
2907
2908/**
2909 * Fetches the next opcode dword, zero extending it to a quad word.
2910 *
2911 * @returns Strict VBox status code.
2912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2913 * @param pu64 Where to return the opcode quad word.
2914 */
2915DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2916{
2917 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2918 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2919 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2920
2921 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2922 pVCpu->iem.s.abOpcode[offOpcode + 1],
2923 pVCpu->iem.s.abOpcode[offOpcode + 2],
2924 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2925 pVCpu->iem.s.offOpcode = offOpcode + 4;
2926 return VINF_SUCCESS;
2927}
2928
2929#endif /* !IEM_WITH_SETJMP */
2930
2931
2932/**
2933 * Fetches the next opcode dword and zero extends it to a quad word, returns
2934 * automatically on failure.
2935 *
2936 * @param a_pu64 Where to return the opcode quad word.
2937 * @remark Implicitly references pVCpu.
2938 */
2939#ifndef IEM_WITH_SETJMP
2940# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2941 do \
2942 { \
2943 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2944 if (rcStrict2 != VINF_SUCCESS) \
2945 return rcStrict2; \
2946 } while (0)
2947#else
2948# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2949#endif
2950
2951
2952#ifndef IEM_WITH_SETJMP
2953/**
2954 * Fetches the next signed double word from the opcode stream.
2955 *
2956 * @returns Strict VBox status code.
2957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2958 * @param pi32 Where to return the signed double word.
2959 */
2960DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2961{
2962 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2963}
2964#endif
2965
2966/**
2967 * Fetches the next signed double word from the opcode stream, returning
2968 * automatically on failure.
2969 *
2970 * @param a_pi32 Where to return the signed double word.
2971 * @remark Implicitly references pVCpu.
2972 */
2973#ifndef IEM_WITH_SETJMP
2974# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2975 do \
2976 { \
2977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2978 if (rcStrict2 != VINF_SUCCESS) \
2979 return rcStrict2; \
2980 } while (0)
2981#else
2982# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2983#endif
2984
2985#ifndef IEM_WITH_SETJMP
2986
2987/**
2988 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2989 *
2990 * @returns Strict VBox status code.
2991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2992 * @param pu64 Where to return the opcode qword.
2993 */
2994DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2995{
2996 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2997 if (rcStrict == VINF_SUCCESS)
2998 {
2999 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3000 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3001 pVCpu->iem.s.abOpcode[offOpcode + 1],
3002 pVCpu->iem.s.abOpcode[offOpcode + 2],
3003 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3004 pVCpu->iem.s.offOpcode = offOpcode + 4;
3005 }
3006 else
3007 *pu64 = 0;
3008 return rcStrict;
3009}
3010
3011
3012/**
3013 * Fetches the next opcode dword, sign extending it into a quad word.
3014 *
3015 * @returns Strict VBox status code.
3016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3017 * @param pu64 Where to return the opcode quad word.
3018 */
3019DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3020{
3021 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3022 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3023 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3024
3025 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3026 pVCpu->iem.s.abOpcode[offOpcode + 1],
3027 pVCpu->iem.s.abOpcode[offOpcode + 2],
3028 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3029 *pu64 = i32;
3030 pVCpu->iem.s.offOpcode = offOpcode + 4;
3031 return VINF_SUCCESS;
3032}
3033
3034#endif /* !IEM_WITH_SETJMP */
3035
3036
3037/**
3038 * Fetches the next opcode double word and sign extends it to a quad word,
3039 * returns automatically on failure.
3040 *
3041 * @param a_pu64 Where to return the opcode quad word.
3042 * @remark Implicitly references pVCpu.
3043 */
3044#ifndef IEM_WITH_SETJMP
3045# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3046 do \
3047 { \
3048 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3049 if (rcStrict2 != VINF_SUCCESS) \
3050 return rcStrict2; \
3051 } while (0)
3052#else
3053# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3054#endif
3055
3056#ifndef IEM_WITH_SETJMP
3057
3058/**
3059 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3060 *
3061 * @returns Strict VBox status code.
3062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3063 * @param pu64 Where to return the opcode qword.
3064 */
3065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3066{
3067 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3068 if (rcStrict == VINF_SUCCESS)
3069 {
3070 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3071# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3072 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3073# else
3074 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3075 pVCpu->iem.s.abOpcode[offOpcode + 1],
3076 pVCpu->iem.s.abOpcode[offOpcode + 2],
3077 pVCpu->iem.s.abOpcode[offOpcode + 3],
3078 pVCpu->iem.s.abOpcode[offOpcode + 4],
3079 pVCpu->iem.s.abOpcode[offOpcode + 5],
3080 pVCpu->iem.s.abOpcode[offOpcode + 6],
3081 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3082# endif
3083 pVCpu->iem.s.offOpcode = offOpcode + 8;
3084 }
3085 else
3086 *pu64 = 0;
3087 return rcStrict;
3088}
3089
3090
3091/**
3092 * Fetches the next opcode qword.
3093 *
3094 * @returns Strict VBox status code.
3095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3096 * @param pu64 Where to return the opcode qword.
3097 */
3098DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3099{
3100 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3101 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3102 {
3103# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3104 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3105# else
3106 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3107 pVCpu->iem.s.abOpcode[offOpcode + 1],
3108 pVCpu->iem.s.abOpcode[offOpcode + 2],
3109 pVCpu->iem.s.abOpcode[offOpcode + 3],
3110 pVCpu->iem.s.abOpcode[offOpcode + 4],
3111 pVCpu->iem.s.abOpcode[offOpcode + 5],
3112 pVCpu->iem.s.abOpcode[offOpcode + 6],
3113 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3114# endif
3115 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3116 return VINF_SUCCESS;
3117 }
3118 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3119}
3120
3121#else /* IEM_WITH_SETJMP */
3122
3123/**
3124 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3125 *
3126 * @returns The opcode qword.
3127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3128 */
3129DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3130{
3131# ifdef IEM_WITH_CODE_TLB
3132 uint64_t u64;
3133 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3134 return u64;
3135# else
3136 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3137 if (rcStrict == VINF_SUCCESS)
3138 {
3139 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3140 pVCpu->iem.s.offOpcode = offOpcode + 8;
3141# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3142 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3143# else
3144 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3145 pVCpu->iem.s.abOpcode[offOpcode + 1],
3146 pVCpu->iem.s.abOpcode[offOpcode + 2],
3147 pVCpu->iem.s.abOpcode[offOpcode + 3],
3148 pVCpu->iem.s.abOpcode[offOpcode + 4],
3149 pVCpu->iem.s.abOpcode[offOpcode + 5],
3150 pVCpu->iem.s.abOpcode[offOpcode + 6],
3151 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3152# endif
3153 }
3154 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3155# endif
3156}
3157
3158
3159/**
3160 * Fetches the next opcode qword, longjmp on error.
3161 *
3162 * @returns The opcode qword.
3163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3164 */
3165DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3166{
3167# ifdef IEM_WITH_CODE_TLB
3168 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3169 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3170 if (RT_LIKELY( pbBuf != NULL
3171 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3172 {
3173 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3174# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3175 return *(uint64_t const *)&pbBuf[offBuf];
3176# else
3177 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3178 pbBuf[offBuf + 1],
3179 pbBuf[offBuf + 2],
3180 pbBuf[offBuf + 3],
3181 pbBuf[offBuf + 4],
3182 pbBuf[offBuf + 5],
3183 pbBuf[offBuf + 6],
3184 pbBuf[offBuf + 7]);
3185# endif
3186 }
3187# else
3188 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3189 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3190 {
3191 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3192# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3193 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3194# else
3195 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3196 pVCpu->iem.s.abOpcode[offOpcode + 1],
3197 pVCpu->iem.s.abOpcode[offOpcode + 2],
3198 pVCpu->iem.s.abOpcode[offOpcode + 3],
3199 pVCpu->iem.s.abOpcode[offOpcode + 4],
3200 pVCpu->iem.s.abOpcode[offOpcode + 5],
3201 pVCpu->iem.s.abOpcode[offOpcode + 6],
3202 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3203# endif
3204 }
3205# endif
3206 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3207}
3208
3209#endif /* IEM_WITH_SETJMP */
3210
3211/**
3212 * Fetches the next opcode quad word, returns automatically on failure.
3213 *
3214 * @param a_pu64 Where to return the opcode quad word.
3215 * @remark Implicitly references pVCpu.
3216 */
3217#ifndef IEM_WITH_SETJMP
3218# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3219 do \
3220 { \
3221 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3222 if (rcStrict2 != VINF_SUCCESS) \
3223 return rcStrict2; \
3224 } while (0)
3225#else
3226# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3227#endif
3228
3229
3230/** @name Misc Worker Functions.
3231 * @{
3232 */
3233
3234/**
3235 * Gets the exception class for the specified exception vector.
3236 *
3237 * @returns The class of the specified exception.
3238 * @param uVector The exception vector.
3239 */
3240IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3241{
3242 Assert(uVector <= X86_XCPT_LAST);
3243 switch (uVector)
3244 {
3245 case X86_XCPT_DE:
3246 case X86_XCPT_TS:
3247 case X86_XCPT_NP:
3248 case X86_XCPT_SS:
3249 case X86_XCPT_GP:
3250 case X86_XCPT_SX: /* AMD only */
3251 return IEMXCPTCLASS_CONTRIBUTORY;
3252
3253 case X86_XCPT_PF:
3254 case X86_XCPT_VE: /* Intel only */
3255 return IEMXCPTCLASS_PAGE_FAULT;
3256 }
3257 return IEMXCPTCLASS_BENIGN;
3258}
3259
3260
3261/**
3262 * Evaluates how to handle an exception caused during delivery of another event
3263 * (exception / interrupt).
3264 *
3265 * @returns How to handle the recursive exception.
3266 * @param pVCpu The cross context virtual CPU structure of the
3267 * calling thread.
3268 * @param fPrevFlags The flags of the previous event.
3269 * @param uPrevVector The vector of the previous event.
3270 * @param fCurFlags The flags of the current exception.
3271 * @param uCurVector The vector of the current exception.
3272 * @param pfXcptRaiseInfo Where to store additional information about the
3273 * exception condition. Optional.
3274 */
3275VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3276 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3277{
3278 /*
3279 * Only CPU exceptions can be raised while delivering other events, software interrupt
3280 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3281 */
3282 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3283 Assert(pVCpu); RT_NOREF(pVCpu);
3284
3285 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3286 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3287 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3288 {
3289 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3290 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3291 {
3292 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3293 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3294 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3295 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3296 {
3297 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3298 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3299 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3300 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3301 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3302 }
3303 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3304 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3305 {
3306 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3307 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3308 }
3309 else if ( uPrevVector == X86_XCPT_DF
3310 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3312 {
3313 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3315 }
3316 }
3317 else
3318 {
3319 if (uPrevVector == X86_XCPT_NMI)
3320 {
3321 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3322 if (uCurVector == X86_XCPT_PF)
3323 {
3324 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3325 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3326 }
3327 }
3328 else if ( uPrevVector == X86_XCPT_AC
3329 && uCurVector == X86_XCPT_AC)
3330 {
3331 enmRaise = IEMXCPTRAISE_CPU_HANG;
3332 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3333 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3334 }
3335 }
3336 }
3337 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3338 {
3339 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3340 if (uCurVector == X86_XCPT_PF)
3341 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3342 }
3343 else
3344 {
3345 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3346 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3347 }
3348
3349 if (pfXcptRaiseInfo)
3350 *pfXcptRaiseInfo = fRaiseInfo;
3351 return enmRaise;
3352}
3353
3354
3355/**
3356 * Enters the CPU shutdown state initiated by a triple fault or other
3357 * unrecoverable conditions.
3358 *
3359 * @returns Strict VBox status code.
3360 * @param pVCpu The cross context virtual CPU structure of the
3361 * calling thread.
3362 */
3363IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3364{
3365 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3366 {
3367 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3368 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3369 }
3370
3371 RT_NOREF(pVCpu);
3372 return VINF_EM_TRIPLE_FAULT;
3373}
3374
3375
3376#ifdef VBOX_WITH_NESTED_HWVIRT
3377IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3378 uint32_t uErr, uint64_t uCr2)
3379{
3380 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3381
3382 /*
3383 * Handle nested-guest SVM exception and software interrupt intercepts,
3384 * see AMD spec. 15.12 "Exception Intercepts".
3385 *
3386 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3387 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3388 * even when they use a vector in the range 0 to 31.
3389 * - ICEBP should not trigger #DB intercept, but its own intercept.
3390 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3391 */
3392 /* Check NMI intercept */
3393 if ( u8Vector == X86_XCPT_NMI
3394 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3395 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3396 {
3397 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3398 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3399 }
3400
3401 /* Check ICEBP intercept. */
3402 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3403 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3404 {
3405 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3406 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3407 }
3408
3409 /* Check CPU exception intercepts. */
3410 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3411 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3412 {
3413 Assert(u8Vector <= X86_XCPT_LAST);
3414 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3415 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3416 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3417 && u8Vector == X86_XCPT_PF
3418 && !(uErr & X86_TRAP_PF_ID))
3419 {
3420 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3421#ifdef IEM_WITH_CODE_TLB
3422#else
3423 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3424 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3425 if ( cbCurrent > 0
3426 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3427 {
3428 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3429 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3430 }
3431#endif
3432 }
3433 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3434 u8Vector, uExitInfo1, uExitInfo2));
3435 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3436 }
3437
3438 /* Check software interrupt (INTn) intercepts. */
3439 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3440 | IEM_XCPT_FLAGS_BP_INSTR
3441 | IEM_XCPT_FLAGS_ICEBP_INSTR
3442 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3443 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3444 {
3445 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3446 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3447 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3448 }
3449
3450 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3451}
3452#endif
3453
3454/**
3455 * Validates a new SS segment.
3456 *
3457 * @returns VBox strict status code.
3458 * @param pVCpu The cross context virtual CPU structure of the
3459 * calling thread.
3460 * @param pCtx The CPU context.
3461 * @param NewSS The new SS selctor.
3462 * @param uCpl The CPL to load the stack for.
3463 * @param pDesc Where to return the descriptor.
3464 */
3465IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3466{
3467 NOREF(pCtx);
3468
3469 /* Null selectors are not allowed (we're not called for dispatching
3470 interrupts with SS=0 in long mode). */
3471 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3472 {
3473 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3474 return iemRaiseTaskSwitchFault0(pVCpu);
3475 }
3476
3477 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3478 if ((NewSS & X86_SEL_RPL) != uCpl)
3479 {
3480 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3481 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3482 }
3483
3484 /*
3485 * Read the descriptor.
3486 */
3487 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490
3491 /*
3492 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3493 */
3494 if (!pDesc->Legacy.Gen.u1DescType)
3495 {
3496 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3497 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3498 }
3499
3500 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3501 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3504 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3505 }
3506 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3507 {
3508 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3509 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3510 }
3511
3512 /* Is it there? */
3513 /** @todo testcase: Is this checked before the canonical / limit check below? */
3514 if (!pDesc->Legacy.Gen.u1Present)
3515 {
3516 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3517 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3518 }
3519
3520 return VINF_SUCCESS;
3521}
3522
3523
3524/**
3525 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3526 * not.
3527 *
3528 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3529 * @param a_pCtx The CPU context.
3530 */
3531#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3532# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3533 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3534 ? (a_pCtx)->eflags.u \
3535 : CPUMRawGetEFlags(a_pVCpu) )
3536#else
3537# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3538 ( (a_pCtx)->eflags.u )
3539#endif
3540
3541/**
3542 * Updates the EFLAGS in the correct manner wrt. PATM.
3543 *
3544 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3545 * @param a_pCtx The CPU context.
3546 * @param a_fEfl The new EFLAGS.
3547 */
3548#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3549# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3550 do { \
3551 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3552 (a_pCtx)->eflags.u = (a_fEfl); \
3553 else \
3554 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3555 } while (0)
3556#else
3557# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3558 do { \
3559 (a_pCtx)->eflags.u = (a_fEfl); \
3560 } while (0)
3561#endif
3562
3563
3564/** @} */
3565
3566/** @name Raising Exceptions.
3567 *
3568 * @{
3569 */
3570
3571
3572/**
3573 * Loads the specified stack far pointer from the TSS.
3574 *
3575 * @returns VBox strict status code.
3576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3577 * @param pCtx The CPU context.
3578 * @param uCpl The CPL to load the stack for.
3579 * @param pSelSS Where to return the new stack segment.
3580 * @param puEsp Where to return the new stack pointer.
3581 */
3582IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3583 PRTSEL pSelSS, uint32_t *puEsp)
3584{
3585 VBOXSTRICTRC rcStrict;
3586 Assert(uCpl < 4);
3587
3588 switch (pCtx->tr.Attr.n.u4Type)
3589 {
3590 /*
3591 * 16-bit TSS (X86TSS16).
3592 */
3593 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3594 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3595 {
3596 uint32_t off = uCpl * 4 + 2;
3597 if (off + 4 <= pCtx->tr.u32Limit)
3598 {
3599 /** @todo check actual access pattern here. */
3600 uint32_t u32Tmp = 0; /* gcc maybe... */
3601 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3602 if (rcStrict == VINF_SUCCESS)
3603 {
3604 *puEsp = RT_LOWORD(u32Tmp);
3605 *pSelSS = RT_HIWORD(u32Tmp);
3606 return VINF_SUCCESS;
3607 }
3608 }
3609 else
3610 {
3611 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3612 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3613 }
3614 break;
3615 }
3616
3617 /*
3618 * 32-bit TSS (X86TSS32).
3619 */
3620 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3621 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3622 {
3623 uint32_t off = uCpl * 8 + 4;
3624 if (off + 7 <= pCtx->tr.u32Limit)
3625 {
3626/** @todo check actual access pattern here. */
3627 uint64_t u64Tmp;
3628 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3629 if (rcStrict == VINF_SUCCESS)
3630 {
3631 *puEsp = u64Tmp & UINT32_MAX;
3632 *pSelSS = (RTSEL)(u64Tmp >> 32);
3633 return VINF_SUCCESS;
3634 }
3635 }
3636 else
3637 {
3638 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3639 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3640 }
3641 break;
3642 }
3643
3644 default:
3645 AssertFailed();
3646 rcStrict = VERR_IEM_IPE_4;
3647 break;
3648 }
3649
3650 *puEsp = 0; /* make gcc happy */
3651 *pSelSS = 0; /* make gcc happy */
3652 return rcStrict;
3653}
3654
3655
3656/**
3657 * Loads the specified stack pointer from the 64-bit TSS.
3658 *
3659 * @returns VBox strict status code.
3660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3661 * @param pCtx The CPU context.
3662 * @param uCpl The CPL to load the stack for.
3663 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3664 * @param puRsp Where to return the new stack pointer.
3665 */
3666IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3667{
3668 Assert(uCpl < 4);
3669 Assert(uIst < 8);
3670 *puRsp = 0; /* make gcc happy */
3671
3672 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3673
3674 uint32_t off;
3675 if (uIst)
3676 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3677 else
3678 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3679 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3680 {
3681 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3682 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3683 }
3684
3685 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3686}
3687
3688
3689/**
3690 * Adjust the CPU state according to the exception being raised.
3691 *
3692 * @param pCtx The CPU context.
3693 * @param u8Vector The exception that has been raised.
3694 */
3695DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3696{
3697 switch (u8Vector)
3698 {
3699 case X86_XCPT_DB:
3700 pCtx->dr[7] &= ~X86_DR7_GD;
3701 break;
3702 /** @todo Read the AMD and Intel exception reference... */
3703 }
3704}
3705
3706
3707/**
3708 * Implements exceptions and interrupts for real mode.
3709 *
3710 * @returns VBox strict status code.
3711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3712 * @param pCtx The CPU context.
3713 * @param cbInstr The number of bytes to offset rIP by in the return
3714 * address.
3715 * @param u8Vector The interrupt / exception vector number.
3716 * @param fFlags The flags.
3717 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3718 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3719 */
3720IEM_STATIC VBOXSTRICTRC
3721iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3722 PCPUMCTX pCtx,
3723 uint8_t cbInstr,
3724 uint8_t u8Vector,
3725 uint32_t fFlags,
3726 uint16_t uErr,
3727 uint64_t uCr2)
3728{
3729 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3730 NOREF(uErr); NOREF(uCr2);
3731
3732 /*
3733 * Read the IDT entry.
3734 */
3735 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3736 {
3737 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3738 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3739 }
3740 RTFAR16 Idte;
3741 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3742 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3743 return rcStrict;
3744
3745 /*
3746 * Push the stack frame.
3747 */
3748 uint16_t *pu16Frame;
3749 uint64_t uNewRsp;
3750 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3751 if (rcStrict != VINF_SUCCESS)
3752 return rcStrict;
3753
3754 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3755#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3756 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3757 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3758 fEfl |= UINT16_C(0xf000);
3759#endif
3760 pu16Frame[2] = (uint16_t)fEfl;
3761 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3762 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3763 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3764 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3765 return rcStrict;
3766
3767 /*
3768 * Load the vector address into cs:ip and make exception specific state
3769 * adjustments.
3770 */
3771 pCtx->cs.Sel = Idte.sel;
3772 pCtx->cs.ValidSel = Idte.sel;
3773 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3774 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3775 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3776 pCtx->rip = Idte.off;
3777 fEfl &= ~X86_EFL_IF;
3778 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3779
3780 /** @todo do we actually do this in real mode? */
3781 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3782 iemRaiseXcptAdjustState(pCtx, u8Vector);
3783
3784 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3785}
3786
3787
3788/**
3789 * Loads a NULL data selector into when coming from V8086 mode.
3790 *
3791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3792 * @param pSReg Pointer to the segment register.
3793 */
3794IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3795{
3796 pSReg->Sel = 0;
3797 pSReg->ValidSel = 0;
3798 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3799 {
3800 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3801 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3802 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3803 }
3804 else
3805 {
3806 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3807 /** @todo check this on AMD-V */
3808 pSReg->u64Base = 0;
3809 pSReg->u32Limit = 0;
3810 }
3811}
3812
3813
3814/**
3815 * Loads a segment selector during a task switch in V8086 mode.
3816 *
3817 * @param pSReg Pointer to the segment register.
3818 * @param uSel The selector value to load.
3819 */
3820IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3821{
3822 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3823 pSReg->Sel = uSel;
3824 pSReg->ValidSel = uSel;
3825 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3826 pSReg->u64Base = uSel << 4;
3827 pSReg->u32Limit = 0xffff;
3828 pSReg->Attr.u = 0xf3;
3829}
3830
3831
3832/**
3833 * Loads a NULL data selector into a selector register, both the hidden and
3834 * visible parts, in protected mode.
3835 *
3836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3837 * @param pSReg Pointer to the segment register.
3838 * @param uRpl The RPL.
3839 */
3840IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3841{
3842 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3843 * data selector in protected mode. */
3844 pSReg->Sel = uRpl;
3845 pSReg->ValidSel = uRpl;
3846 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3847 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3848 {
3849 /* VT-x (Intel 3960x) observed doing something like this. */
3850 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3851 pSReg->u32Limit = UINT32_MAX;
3852 pSReg->u64Base = 0;
3853 }
3854 else
3855 {
3856 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3857 pSReg->u32Limit = 0;
3858 pSReg->u64Base = 0;
3859 }
3860}
3861
3862
3863/**
3864 * Loads a segment selector during a task switch in protected mode.
3865 *
3866 * In this task switch scenario, we would throw \#TS exceptions rather than
3867 * \#GPs.
3868 *
3869 * @returns VBox strict status code.
3870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3871 * @param pSReg Pointer to the segment register.
3872 * @param uSel The new selector value.
3873 *
3874 * @remarks This does _not_ handle CS or SS.
3875 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3876 */
3877IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3878{
3879 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3880
3881 /* Null data selector. */
3882 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3883 {
3884 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3886 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3887 return VINF_SUCCESS;
3888 }
3889
3890 /* Fetch the descriptor. */
3891 IEMSELDESC Desc;
3892 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3893 if (rcStrict != VINF_SUCCESS)
3894 {
3895 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3896 VBOXSTRICTRC_VAL(rcStrict)));
3897 return rcStrict;
3898 }
3899
3900 /* Must be a data segment or readable code segment. */
3901 if ( !Desc.Legacy.Gen.u1DescType
3902 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3905 Desc.Legacy.Gen.u4Type));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908
3909 /* Check privileges for data segments and non-conforming code segments. */
3910 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3911 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3912 {
3913 /* The RPL and the new CPL must be less than or equal to the DPL. */
3914 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3915 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3916 {
3917 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3918 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3919 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3920 }
3921 }
3922
3923 /* Is it there? */
3924 if (!Desc.Legacy.Gen.u1Present)
3925 {
3926 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3927 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3928 }
3929
3930 /* The base and limit. */
3931 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3932 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3933
3934 /*
3935 * Ok, everything checked out fine. Now set the accessed bit before
3936 * committing the result into the registers.
3937 */
3938 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3939 {
3940 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3941 if (rcStrict != VINF_SUCCESS)
3942 return rcStrict;
3943 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3944 }
3945
3946 /* Commit */
3947 pSReg->Sel = uSel;
3948 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3949 pSReg->u32Limit = cbLimit;
3950 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3951 pSReg->ValidSel = uSel;
3952 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3953 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3954 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3955
3956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3957 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3958 return VINF_SUCCESS;
3959}
3960
3961
3962/**
3963 * Performs a task switch.
3964 *
3965 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3966 * caller is responsible for performing the necessary checks (like DPL, TSS
3967 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3968 * reference for JMP, CALL, IRET.
3969 *
3970 * If the task switch is the due to a software interrupt or hardware exception,
3971 * the caller is responsible for validating the TSS selector and descriptor. See
3972 * Intel Instruction reference for INT n.
3973 *
3974 * @returns VBox strict status code.
3975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3976 * @param pCtx The CPU context.
3977 * @param enmTaskSwitch What caused this task switch.
3978 * @param uNextEip The EIP effective after the task switch.
3979 * @param fFlags The flags.
3980 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3981 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3982 * @param SelTSS The TSS selector of the new task.
3983 * @param pNewDescTSS Pointer to the new TSS descriptor.
3984 */
3985IEM_STATIC VBOXSTRICTRC
3986iemTaskSwitch(PVMCPU pVCpu,
3987 PCPUMCTX pCtx,
3988 IEMTASKSWITCH enmTaskSwitch,
3989 uint32_t uNextEip,
3990 uint32_t fFlags,
3991 uint16_t uErr,
3992 uint64_t uCr2,
3993 RTSEL SelTSS,
3994 PIEMSELDESC pNewDescTSS)
3995{
3996 Assert(!IEM_IS_REAL_MODE(pVCpu));
3997 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3998
3999 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4000 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4001 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4002 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4003 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4004
4005 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4006 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4007
4008 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4009 fIsNewTSS386, pCtx->eip, uNextEip));
4010
4011 /* Update CR2 in case it's a page-fault. */
4012 /** @todo This should probably be done much earlier in IEM/PGM. See
4013 * @bugref{5653#c49}. */
4014 if (fFlags & IEM_XCPT_FLAGS_CR2)
4015 pCtx->cr2 = uCr2;
4016
4017 /*
4018 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4019 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4020 */
4021 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4022 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4023 if (uNewTSSLimit < uNewTSSLimitMin)
4024 {
4025 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4026 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4027 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4028 }
4029
4030 /*
4031 * Check the current TSS limit. The last written byte to the current TSS during the
4032 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4033 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4034 *
4035 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4036 * end up with smaller than "legal" TSS limits.
4037 */
4038 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4039 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4040 if (uCurTSSLimit < uCurTSSLimitMin)
4041 {
4042 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4043 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4044 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4045 }
4046
4047 /*
4048 * Verify that the new TSS can be accessed and map it. Map only the required contents
4049 * and not the entire TSS.
4050 */
4051 void *pvNewTSS;
4052 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4053 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4054 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4055 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4056 * not perform correct translation if this happens. See Intel spec. 7.2.1
4057 * "Task-State Segment" */
4058 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4059 if (rcStrict != VINF_SUCCESS)
4060 {
4061 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4062 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4063 return rcStrict;
4064 }
4065
4066 /*
4067 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4068 */
4069 uint32_t u32EFlags = pCtx->eflags.u32;
4070 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4071 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4072 {
4073 PX86DESC pDescCurTSS;
4074 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4075 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4076 if (rcStrict != VINF_SUCCESS)
4077 {
4078 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4079 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4080 return rcStrict;
4081 }
4082
4083 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4084 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4085 if (rcStrict != VINF_SUCCESS)
4086 {
4087 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4088 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4089 return rcStrict;
4090 }
4091
4092 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4093 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4094 {
4095 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4096 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4097 u32EFlags &= ~X86_EFL_NT;
4098 }
4099 }
4100
4101 /*
4102 * Save the CPU state into the current TSS.
4103 */
4104 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4105 if (GCPtrNewTSS == GCPtrCurTSS)
4106 {
4107 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4108 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4109 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4110 }
4111 if (fIsNewTSS386)
4112 {
4113 /*
4114 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4115 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4116 */
4117 void *pvCurTSS32;
4118 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4119 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4120 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4121 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4122 if (rcStrict != VINF_SUCCESS)
4123 {
4124 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4125 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4126 return rcStrict;
4127 }
4128
4129 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4130 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4131 pCurTSS32->eip = uNextEip;
4132 pCurTSS32->eflags = u32EFlags;
4133 pCurTSS32->eax = pCtx->eax;
4134 pCurTSS32->ecx = pCtx->ecx;
4135 pCurTSS32->edx = pCtx->edx;
4136 pCurTSS32->ebx = pCtx->ebx;
4137 pCurTSS32->esp = pCtx->esp;
4138 pCurTSS32->ebp = pCtx->ebp;
4139 pCurTSS32->esi = pCtx->esi;
4140 pCurTSS32->edi = pCtx->edi;
4141 pCurTSS32->es = pCtx->es.Sel;
4142 pCurTSS32->cs = pCtx->cs.Sel;
4143 pCurTSS32->ss = pCtx->ss.Sel;
4144 pCurTSS32->ds = pCtx->ds.Sel;
4145 pCurTSS32->fs = pCtx->fs.Sel;
4146 pCurTSS32->gs = pCtx->gs.Sel;
4147
4148 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4149 if (rcStrict != VINF_SUCCESS)
4150 {
4151 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4152 VBOXSTRICTRC_VAL(rcStrict)));
4153 return rcStrict;
4154 }
4155 }
4156 else
4157 {
4158 /*
4159 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4160 */
4161 void *pvCurTSS16;
4162 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4163 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4164 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4165 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4166 if (rcStrict != VINF_SUCCESS)
4167 {
4168 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4169 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4170 return rcStrict;
4171 }
4172
4173 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4174 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4175 pCurTSS16->ip = uNextEip;
4176 pCurTSS16->flags = u32EFlags;
4177 pCurTSS16->ax = pCtx->ax;
4178 pCurTSS16->cx = pCtx->cx;
4179 pCurTSS16->dx = pCtx->dx;
4180 pCurTSS16->bx = pCtx->bx;
4181 pCurTSS16->sp = pCtx->sp;
4182 pCurTSS16->bp = pCtx->bp;
4183 pCurTSS16->si = pCtx->si;
4184 pCurTSS16->di = pCtx->di;
4185 pCurTSS16->es = pCtx->es.Sel;
4186 pCurTSS16->cs = pCtx->cs.Sel;
4187 pCurTSS16->ss = pCtx->ss.Sel;
4188 pCurTSS16->ds = pCtx->ds.Sel;
4189
4190 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4191 if (rcStrict != VINF_SUCCESS)
4192 {
4193 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4194 VBOXSTRICTRC_VAL(rcStrict)));
4195 return rcStrict;
4196 }
4197 }
4198
4199 /*
4200 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4201 */
4202 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4203 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4204 {
4205 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4206 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4207 pNewTSS->selPrev = pCtx->tr.Sel;
4208 }
4209
4210 /*
4211 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4212 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4213 */
4214 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4215 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4216 bool fNewDebugTrap;
4217 if (fIsNewTSS386)
4218 {
4219 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4220 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4221 uNewEip = pNewTSS32->eip;
4222 uNewEflags = pNewTSS32->eflags;
4223 uNewEax = pNewTSS32->eax;
4224 uNewEcx = pNewTSS32->ecx;
4225 uNewEdx = pNewTSS32->edx;
4226 uNewEbx = pNewTSS32->ebx;
4227 uNewEsp = pNewTSS32->esp;
4228 uNewEbp = pNewTSS32->ebp;
4229 uNewEsi = pNewTSS32->esi;
4230 uNewEdi = pNewTSS32->edi;
4231 uNewES = pNewTSS32->es;
4232 uNewCS = pNewTSS32->cs;
4233 uNewSS = pNewTSS32->ss;
4234 uNewDS = pNewTSS32->ds;
4235 uNewFS = pNewTSS32->fs;
4236 uNewGS = pNewTSS32->gs;
4237 uNewLdt = pNewTSS32->selLdt;
4238 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4239 }
4240 else
4241 {
4242 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4243 uNewCr3 = 0;
4244 uNewEip = pNewTSS16->ip;
4245 uNewEflags = pNewTSS16->flags;
4246 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4247 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4248 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4249 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4250 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4251 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4252 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4253 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4254 uNewES = pNewTSS16->es;
4255 uNewCS = pNewTSS16->cs;
4256 uNewSS = pNewTSS16->ss;
4257 uNewDS = pNewTSS16->ds;
4258 uNewFS = 0;
4259 uNewGS = 0;
4260 uNewLdt = pNewTSS16->selLdt;
4261 fNewDebugTrap = false;
4262 }
4263
4264 if (GCPtrNewTSS == GCPtrCurTSS)
4265 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4266 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4267
4268 /*
4269 * We're done accessing the new TSS.
4270 */
4271 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4272 if (rcStrict != VINF_SUCCESS)
4273 {
4274 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4275 return rcStrict;
4276 }
4277
4278 /*
4279 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4280 */
4281 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4282 {
4283 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4284 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4285 if (rcStrict != VINF_SUCCESS)
4286 {
4287 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4288 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4289 return rcStrict;
4290 }
4291
4292 /* Check that the descriptor indicates the new TSS is available (not busy). */
4293 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4294 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4295 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4296
4297 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4298 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4299 if (rcStrict != VINF_SUCCESS)
4300 {
4301 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4302 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4303 return rcStrict;
4304 }
4305 }
4306
4307 /*
4308 * From this point on, we're technically in the new task. We will defer exceptions
4309 * until the completion of the task switch but before executing any instructions in the new task.
4310 */
4311 pCtx->tr.Sel = SelTSS;
4312 pCtx->tr.ValidSel = SelTSS;
4313 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4314 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4315 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4316 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4318
4319 /* Set the busy bit in TR. */
4320 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4321 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4322 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4323 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4324 {
4325 uNewEflags |= X86_EFL_NT;
4326 }
4327
4328 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4329 pCtx->cr0 |= X86_CR0_TS;
4330 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4331
4332 pCtx->eip = uNewEip;
4333 pCtx->eax = uNewEax;
4334 pCtx->ecx = uNewEcx;
4335 pCtx->edx = uNewEdx;
4336 pCtx->ebx = uNewEbx;
4337 pCtx->esp = uNewEsp;
4338 pCtx->ebp = uNewEbp;
4339 pCtx->esi = uNewEsi;
4340 pCtx->edi = uNewEdi;
4341
4342 uNewEflags &= X86_EFL_LIVE_MASK;
4343 uNewEflags |= X86_EFL_RA1_MASK;
4344 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4345
4346 /*
4347 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4348 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4349 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4350 */
4351 pCtx->es.Sel = uNewES;
4352 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4353
4354 pCtx->cs.Sel = uNewCS;
4355 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4356
4357 pCtx->ss.Sel = uNewSS;
4358 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4359
4360 pCtx->ds.Sel = uNewDS;
4361 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4362
4363 pCtx->fs.Sel = uNewFS;
4364 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4365
4366 pCtx->gs.Sel = uNewGS;
4367 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4368 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4369
4370 pCtx->ldtr.Sel = uNewLdt;
4371 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4372 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4373 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4374
4375 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4376 {
4377 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4378 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4379 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4380 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4381 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4382 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4383 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4384 }
4385
4386 /*
4387 * Switch CR3 for the new task.
4388 */
4389 if ( fIsNewTSS386
4390 && (pCtx->cr0 & X86_CR0_PG))
4391 {
4392 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4393 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4394 {
4395 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4396 AssertRCSuccessReturn(rc, rc);
4397 }
4398 else
4399 pCtx->cr3 = uNewCr3;
4400
4401 /* Inform PGM. */
4402 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4403 {
4404 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4405 AssertRCReturn(rc, rc);
4406 /* ignore informational status codes */
4407 }
4408 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4409 }
4410
4411 /*
4412 * Switch LDTR for the new task.
4413 */
4414 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4415 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4416 else
4417 {
4418 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4419
4420 IEMSELDESC DescNewLdt;
4421 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4422 if (rcStrict != VINF_SUCCESS)
4423 {
4424 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4425 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4426 return rcStrict;
4427 }
4428 if ( !DescNewLdt.Legacy.Gen.u1Present
4429 || DescNewLdt.Legacy.Gen.u1DescType
4430 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4431 {
4432 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4433 uNewLdt, DescNewLdt.Legacy.u));
4434 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4435 }
4436
4437 pCtx->ldtr.ValidSel = uNewLdt;
4438 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4439 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4440 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4441 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4442 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4443 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4444 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4445 }
4446
4447 IEMSELDESC DescSS;
4448 if (IEM_IS_V86_MODE(pVCpu))
4449 {
4450 pVCpu->iem.s.uCpl = 3;
4451 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4452 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4453 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4454 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4455 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4456 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4457
4458 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4459 DescSS.Legacy.u = 0;
4460 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4461 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4462 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4463 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4464 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4465 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4466 DescSS.Legacy.Gen.u2Dpl = 3;
4467 }
4468 else
4469 {
4470 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4471
4472 /*
4473 * Load the stack segment for the new task.
4474 */
4475 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4476 {
4477 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4478 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4479 }
4480
4481 /* Fetch the descriptor. */
4482 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4483 if (rcStrict != VINF_SUCCESS)
4484 {
4485 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4486 VBOXSTRICTRC_VAL(rcStrict)));
4487 return rcStrict;
4488 }
4489
4490 /* SS must be a data segment and writable. */
4491 if ( !DescSS.Legacy.Gen.u1DescType
4492 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4493 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4494 {
4495 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4496 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4497 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4498 }
4499
4500 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4501 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4502 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4503 {
4504 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4505 uNewCpl));
4506 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4507 }
4508
4509 /* Is it there? */
4510 if (!DescSS.Legacy.Gen.u1Present)
4511 {
4512 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4513 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4514 }
4515
4516 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4517 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4518
4519 /* Set the accessed bit before committing the result into SS. */
4520 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4521 {
4522 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4523 if (rcStrict != VINF_SUCCESS)
4524 return rcStrict;
4525 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4526 }
4527
4528 /* Commit SS. */
4529 pCtx->ss.Sel = uNewSS;
4530 pCtx->ss.ValidSel = uNewSS;
4531 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4532 pCtx->ss.u32Limit = cbLimit;
4533 pCtx->ss.u64Base = u64Base;
4534 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4535 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4536
4537 /* CPL has changed, update IEM before loading rest of segments. */
4538 pVCpu->iem.s.uCpl = uNewCpl;
4539
4540 /*
4541 * Load the data segments for the new task.
4542 */
4543 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4544 if (rcStrict != VINF_SUCCESS)
4545 return rcStrict;
4546 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4547 if (rcStrict != VINF_SUCCESS)
4548 return rcStrict;
4549 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4550 if (rcStrict != VINF_SUCCESS)
4551 return rcStrict;
4552 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555
4556 /*
4557 * Load the code segment for the new task.
4558 */
4559 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4560 {
4561 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* Fetch the descriptor. */
4566 IEMSELDESC DescCS;
4567 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4568 if (rcStrict != VINF_SUCCESS)
4569 {
4570 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4571 return rcStrict;
4572 }
4573
4574 /* CS must be a code segment. */
4575 if ( !DescCS.Legacy.Gen.u1DescType
4576 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4577 {
4578 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4579 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4580 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4581 }
4582
4583 /* For conforming CS, DPL must be less than or equal to the RPL. */
4584 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4585 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4586 {
4587 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4588 DescCS.Legacy.Gen.u2Dpl));
4589 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4590 }
4591
4592 /* For non-conforming CS, DPL must match RPL. */
4593 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4594 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4595 {
4596 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4597 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4598 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4599 }
4600
4601 /* Is it there? */
4602 if (!DescCS.Legacy.Gen.u1Present)
4603 {
4604 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4605 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4606 }
4607
4608 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4609 u64Base = X86DESC_BASE(&DescCS.Legacy);
4610
4611 /* Set the accessed bit before committing the result into CS. */
4612 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4613 {
4614 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4618 }
4619
4620 /* Commit CS. */
4621 pCtx->cs.Sel = uNewCS;
4622 pCtx->cs.ValidSel = uNewCS;
4623 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4624 pCtx->cs.u32Limit = cbLimit;
4625 pCtx->cs.u64Base = u64Base;
4626 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4627 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4628 }
4629
4630 /** @todo Debug trap. */
4631 if (fIsNewTSS386 && fNewDebugTrap)
4632 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4633
4634 /*
4635 * Construct the error code masks based on what caused this task switch.
4636 * See Intel Instruction reference for INT.
4637 */
4638 uint16_t uExt;
4639 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4640 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4641 {
4642 uExt = 1;
4643 }
4644 else
4645 uExt = 0;
4646
4647 /*
4648 * Push any error code on to the new stack.
4649 */
4650 if (fFlags & IEM_XCPT_FLAGS_ERR)
4651 {
4652 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4653 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4654 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4655
4656 /* Check that there is sufficient space on the stack. */
4657 /** @todo Factor out segment limit checking for normal/expand down segments
4658 * into a separate function. */
4659 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4660 {
4661 if ( pCtx->esp - 1 > cbLimitSS
4662 || pCtx->esp < cbStackFrame)
4663 {
4664 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4665 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4666 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4667 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4668 }
4669 }
4670 else
4671 {
4672 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4673 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4674 {
4675 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4676 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4677 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4678 }
4679 }
4680
4681
4682 if (fIsNewTSS386)
4683 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4684 else
4685 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4686 if (rcStrict != VINF_SUCCESS)
4687 {
4688 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4689 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4690 return rcStrict;
4691 }
4692 }
4693
4694 /* Check the new EIP against the new CS limit. */
4695 if (pCtx->eip > pCtx->cs.u32Limit)
4696 {
4697 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4698 pCtx->eip, pCtx->cs.u32Limit));
4699 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4700 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4701 }
4702
4703 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4704 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4705}
4706
4707
4708/**
4709 * Implements exceptions and interrupts for protected mode.
4710 *
4711 * @returns VBox strict status code.
4712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4713 * @param pCtx The CPU context.
4714 * @param cbInstr The number of bytes to offset rIP by in the return
4715 * address.
4716 * @param u8Vector The interrupt / exception vector number.
4717 * @param fFlags The flags.
4718 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4719 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4720 */
4721IEM_STATIC VBOXSTRICTRC
4722iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4723 PCPUMCTX pCtx,
4724 uint8_t cbInstr,
4725 uint8_t u8Vector,
4726 uint32_t fFlags,
4727 uint16_t uErr,
4728 uint64_t uCr2)
4729{
4730 /*
4731 * Read the IDT entry.
4732 */
4733 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4734 {
4735 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4736 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4737 }
4738 X86DESC Idte;
4739 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4740 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4741 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4742 return rcStrict;
4743 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4744 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4745 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4746
4747 /*
4748 * Check the descriptor type, DPL and such.
4749 * ASSUMES this is done in the same order as described for call-gate calls.
4750 */
4751 if (Idte.Gate.u1DescType)
4752 {
4753 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4754 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4755 }
4756 bool fTaskGate = false;
4757 uint8_t f32BitGate = true;
4758 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4759 switch (Idte.Gate.u4Type)
4760 {
4761 case X86_SEL_TYPE_SYS_UNDEFINED:
4762 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4763 case X86_SEL_TYPE_SYS_LDT:
4764 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4765 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4766 case X86_SEL_TYPE_SYS_UNDEFINED2:
4767 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4768 case X86_SEL_TYPE_SYS_UNDEFINED3:
4769 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4770 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4771 case X86_SEL_TYPE_SYS_UNDEFINED4:
4772 {
4773 /** @todo check what actually happens when the type is wrong...
4774 * esp. call gates. */
4775 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4776 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4777 }
4778
4779 case X86_SEL_TYPE_SYS_286_INT_GATE:
4780 f32BitGate = false;
4781 /* fall thru */
4782 case X86_SEL_TYPE_SYS_386_INT_GATE:
4783 fEflToClear |= X86_EFL_IF;
4784 break;
4785
4786 case X86_SEL_TYPE_SYS_TASK_GATE:
4787 fTaskGate = true;
4788#ifndef IEM_IMPLEMENTS_TASKSWITCH
4789 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4790#endif
4791 break;
4792
4793 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4794 f32BitGate = false;
4795 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4796 break;
4797
4798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4799 }
4800
4801 /* Check DPL against CPL if applicable. */
4802 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4803 {
4804 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4807 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4808 }
4809 }
4810
4811 /* Is it there? */
4812 if (!Idte.Gate.u1Present)
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4815 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4816 }
4817
4818 /* Is it a task-gate? */
4819 if (fTaskGate)
4820 {
4821 /*
4822 * Construct the error code masks based on what caused this task switch.
4823 * See Intel Instruction reference for INT.
4824 */
4825 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4826 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4827 RTSEL SelTSS = Idte.Gate.u16Sel;
4828
4829 /*
4830 * Fetch the TSS descriptor in the GDT.
4831 */
4832 IEMSELDESC DescTSS;
4833 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4834 if (rcStrict != VINF_SUCCESS)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4837 VBOXSTRICTRC_VAL(rcStrict)));
4838 return rcStrict;
4839 }
4840
4841 /* The TSS descriptor must be a system segment and be available (not busy). */
4842 if ( DescTSS.Legacy.Gen.u1DescType
4843 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4844 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4845 {
4846 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4847 u8Vector, SelTSS, DescTSS.Legacy.au64));
4848 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4849 }
4850
4851 /* The TSS must be present. */
4852 if (!DescTSS.Legacy.Gen.u1Present)
4853 {
4854 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4855 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4856 }
4857
4858 /* Do the actual task switch. */
4859 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4860 }
4861
4862 /* A null CS is bad. */
4863 RTSEL NewCS = Idte.Gate.u16Sel;
4864 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4865 {
4866 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4867 return iemRaiseGeneralProtectionFault0(pVCpu);
4868 }
4869
4870 /* Fetch the descriptor for the new CS. */
4871 IEMSELDESC DescCS;
4872 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4873 if (rcStrict != VINF_SUCCESS)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4876 return rcStrict;
4877 }
4878
4879 /* Must be a code segment. */
4880 if (!DescCS.Legacy.Gen.u1DescType)
4881 {
4882 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4883 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4884 }
4885 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4886 {
4887 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4888 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4889 }
4890
4891 /* Don't allow lowering the privilege level. */
4892 /** @todo Does the lowering of privileges apply to software interrupts
4893 * only? This has bearings on the more-privileged or
4894 * same-privilege stack behavior further down. A testcase would
4895 * be nice. */
4896 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4899 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4900 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4901 }
4902
4903 /* Make sure the selector is present. */
4904 if (!DescCS.Legacy.Gen.u1Present)
4905 {
4906 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4907 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4908 }
4909
4910 /* Check the new EIP against the new CS limit. */
4911 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4912 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4913 ? Idte.Gate.u16OffsetLow
4914 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4915 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4916 if (uNewEip > cbLimitCS)
4917 {
4918 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4919 u8Vector, uNewEip, cbLimitCS, NewCS));
4920 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4921 }
4922 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4923
4924 /* Calc the flag image to push. */
4925 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4926 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4927 fEfl &= ~X86_EFL_RF;
4928 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4929 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4930
4931 /* From V8086 mode only go to CPL 0. */
4932 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4933 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4934 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4935 {
4936 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4937 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4938 }
4939
4940 /*
4941 * If the privilege level changes, we need to get a new stack from the TSS.
4942 * This in turns means validating the new SS and ESP...
4943 */
4944 if (uNewCpl != pVCpu->iem.s.uCpl)
4945 {
4946 RTSEL NewSS;
4947 uint32_t uNewEsp;
4948 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4949 if (rcStrict != VINF_SUCCESS)
4950 return rcStrict;
4951
4952 IEMSELDESC DescSS;
4953 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4954 if (rcStrict != VINF_SUCCESS)
4955 return rcStrict;
4956 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4957 if (!DescSS.Legacy.Gen.u1DefBig)
4958 {
4959 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4960 uNewEsp = (uint16_t)uNewEsp;
4961 }
4962
4963 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4964
4965 /* Check that there is sufficient space for the stack frame. */
4966 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4967 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4968 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4969 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4970
4971 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4972 {
4973 if ( uNewEsp - 1 > cbLimitSS
4974 || uNewEsp < cbStackFrame)
4975 {
4976 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4977 u8Vector, NewSS, uNewEsp, cbStackFrame));
4978 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4979 }
4980 }
4981 else
4982 {
4983 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4984 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4985 {
4986 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4987 u8Vector, NewSS, uNewEsp, cbStackFrame));
4988 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4989 }
4990 }
4991
4992 /*
4993 * Start making changes.
4994 */
4995
4996 /* Set the new CPL so that stack accesses use it. */
4997 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4998 pVCpu->iem.s.uCpl = uNewCpl;
4999
5000 /* Create the stack frame. */
5001 RTPTRUNION uStackFrame;
5002 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5003 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5004 if (rcStrict != VINF_SUCCESS)
5005 return rcStrict;
5006 void * const pvStackFrame = uStackFrame.pv;
5007 if (f32BitGate)
5008 {
5009 if (fFlags & IEM_XCPT_FLAGS_ERR)
5010 *uStackFrame.pu32++ = uErr;
5011 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
5012 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5013 uStackFrame.pu32[2] = fEfl;
5014 uStackFrame.pu32[3] = pCtx->esp;
5015 uStackFrame.pu32[4] = pCtx->ss.Sel;
5016 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5017 if (fEfl & X86_EFL_VM)
5018 {
5019 uStackFrame.pu32[1] = pCtx->cs.Sel;
5020 uStackFrame.pu32[5] = pCtx->es.Sel;
5021 uStackFrame.pu32[6] = pCtx->ds.Sel;
5022 uStackFrame.pu32[7] = pCtx->fs.Sel;
5023 uStackFrame.pu32[8] = pCtx->gs.Sel;
5024 }
5025 }
5026 else
5027 {
5028 if (fFlags & IEM_XCPT_FLAGS_ERR)
5029 *uStackFrame.pu16++ = uErr;
5030 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5031 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5032 uStackFrame.pu16[2] = fEfl;
5033 uStackFrame.pu16[3] = pCtx->sp;
5034 uStackFrame.pu16[4] = pCtx->ss.Sel;
5035 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5036 if (fEfl & X86_EFL_VM)
5037 {
5038 uStackFrame.pu16[1] = pCtx->cs.Sel;
5039 uStackFrame.pu16[5] = pCtx->es.Sel;
5040 uStackFrame.pu16[6] = pCtx->ds.Sel;
5041 uStackFrame.pu16[7] = pCtx->fs.Sel;
5042 uStackFrame.pu16[8] = pCtx->gs.Sel;
5043 }
5044 }
5045 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5046 if (rcStrict != VINF_SUCCESS)
5047 return rcStrict;
5048
5049 /* Mark the selectors 'accessed' (hope this is the correct time). */
5050 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5051 * after pushing the stack frame? (Write protect the gdt + stack to
5052 * find out.) */
5053 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5054 {
5055 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5056 if (rcStrict != VINF_SUCCESS)
5057 return rcStrict;
5058 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5059 }
5060
5061 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5062 {
5063 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5064 if (rcStrict != VINF_SUCCESS)
5065 return rcStrict;
5066 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5067 }
5068
5069 /*
5070 * Start comitting the register changes (joins with the DPL=CPL branch).
5071 */
5072 pCtx->ss.Sel = NewSS;
5073 pCtx->ss.ValidSel = NewSS;
5074 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5075 pCtx->ss.u32Limit = cbLimitSS;
5076 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5077 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5078 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5079 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5080 * SP is loaded).
5081 * Need to check the other combinations too:
5082 * - 16-bit TSS, 32-bit handler
5083 * - 32-bit TSS, 16-bit handler */
5084 if (!pCtx->ss.Attr.n.u1DefBig)
5085 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5086 else
5087 pCtx->rsp = uNewEsp - cbStackFrame;
5088
5089 if (fEfl & X86_EFL_VM)
5090 {
5091 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5092 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5093 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5094 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5095 }
5096 }
5097 /*
5098 * Same privilege, no stack change and smaller stack frame.
5099 */
5100 else
5101 {
5102 uint64_t uNewRsp;
5103 RTPTRUNION uStackFrame;
5104 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5105 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5106 if (rcStrict != VINF_SUCCESS)
5107 return rcStrict;
5108 void * const pvStackFrame = uStackFrame.pv;
5109
5110 if (f32BitGate)
5111 {
5112 if (fFlags & IEM_XCPT_FLAGS_ERR)
5113 *uStackFrame.pu32++ = uErr;
5114 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5115 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5116 uStackFrame.pu32[2] = fEfl;
5117 }
5118 else
5119 {
5120 if (fFlags & IEM_XCPT_FLAGS_ERR)
5121 *uStackFrame.pu16++ = uErr;
5122 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5123 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5124 uStackFrame.pu16[2] = fEfl;
5125 }
5126 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5127 if (rcStrict != VINF_SUCCESS)
5128 return rcStrict;
5129
5130 /* Mark the CS selector as 'accessed'. */
5131 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5132 {
5133 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5134 if (rcStrict != VINF_SUCCESS)
5135 return rcStrict;
5136 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5137 }
5138
5139 /*
5140 * Start committing the register changes (joins with the other branch).
5141 */
5142 pCtx->rsp = uNewRsp;
5143 }
5144
5145 /* ... register committing continues. */
5146 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5147 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5148 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5149 pCtx->cs.u32Limit = cbLimitCS;
5150 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5151 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5152
5153 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5154 fEfl &= ~fEflToClear;
5155 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5156
5157 if (fFlags & IEM_XCPT_FLAGS_CR2)
5158 pCtx->cr2 = uCr2;
5159
5160 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5161 iemRaiseXcptAdjustState(pCtx, u8Vector);
5162
5163 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5164}
5165
5166
5167/**
5168 * Implements exceptions and interrupts for long mode.
5169 *
5170 * @returns VBox strict status code.
5171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5172 * @param pCtx The CPU context.
5173 * @param cbInstr The number of bytes to offset rIP by in the return
5174 * address.
5175 * @param u8Vector The interrupt / exception vector number.
5176 * @param fFlags The flags.
5177 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5178 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5179 */
5180IEM_STATIC VBOXSTRICTRC
5181iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5182 PCPUMCTX pCtx,
5183 uint8_t cbInstr,
5184 uint8_t u8Vector,
5185 uint32_t fFlags,
5186 uint16_t uErr,
5187 uint64_t uCr2)
5188{
5189 /*
5190 * Read the IDT entry.
5191 */
5192 uint16_t offIdt = (uint16_t)u8Vector << 4;
5193 if (pCtx->idtr.cbIdt < offIdt + 7)
5194 {
5195 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5196 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5197 }
5198 X86DESC64 Idte;
5199 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5200 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5201 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5202 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5203 return rcStrict;
5204 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5205 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5206 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5207
5208 /*
5209 * Check the descriptor type, DPL and such.
5210 * ASSUMES this is done in the same order as described for call-gate calls.
5211 */
5212 if (Idte.Gate.u1DescType)
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5215 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5216 }
5217 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5218 switch (Idte.Gate.u4Type)
5219 {
5220 case AMD64_SEL_TYPE_SYS_INT_GATE:
5221 fEflToClear |= X86_EFL_IF;
5222 break;
5223 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5224 break;
5225
5226 default:
5227 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5228 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5229 }
5230
5231 /* Check DPL against CPL if applicable. */
5232 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5233 {
5234 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5235 {
5236 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5237 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5238 }
5239 }
5240
5241 /* Is it there? */
5242 if (!Idte.Gate.u1Present)
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5245 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5246 }
5247
5248 /* A null CS is bad. */
5249 RTSEL NewCS = Idte.Gate.u16Sel;
5250 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5251 {
5252 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5253 return iemRaiseGeneralProtectionFault0(pVCpu);
5254 }
5255
5256 /* Fetch the descriptor for the new CS. */
5257 IEMSELDESC DescCS;
5258 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5259 if (rcStrict != VINF_SUCCESS)
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5262 return rcStrict;
5263 }
5264
5265 /* Must be a 64-bit code segment. */
5266 if (!DescCS.Long.Gen.u1DescType)
5267 {
5268 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5269 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5270 }
5271 if ( !DescCS.Long.Gen.u1Long
5272 || DescCS.Long.Gen.u1DefBig
5273 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5276 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5277 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280 /* Don't allow lowering the privilege level. For non-conforming CS
5281 selectors, the CS.DPL sets the privilege level the trap/interrupt
5282 handler runs at. For conforming CS selectors, the CPL remains
5283 unchanged, but the CS.DPL must be <= CPL. */
5284 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5285 * when CPU in Ring-0. Result \#GP? */
5286 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5287 {
5288 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5289 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5290 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5291 }
5292
5293
5294 /* Make sure the selector is present. */
5295 if (!DescCS.Legacy.Gen.u1Present)
5296 {
5297 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5298 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5299 }
5300
5301 /* Check that the new RIP is canonical. */
5302 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5303 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5304 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5305 if (!IEM_IS_CANONICAL(uNewRip))
5306 {
5307 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5308 return iemRaiseGeneralProtectionFault0(pVCpu);
5309 }
5310
5311 /*
5312 * If the privilege level changes or if the IST isn't zero, we need to get
5313 * a new stack from the TSS.
5314 */
5315 uint64_t uNewRsp;
5316 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5317 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5318 if ( uNewCpl != pVCpu->iem.s.uCpl
5319 || Idte.Gate.u3IST != 0)
5320 {
5321 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5322 if (rcStrict != VINF_SUCCESS)
5323 return rcStrict;
5324 }
5325 else
5326 uNewRsp = pCtx->rsp;
5327 uNewRsp &= ~(uint64_t)0xf;
5328
5329 /*
5330 * Calc the flag image to push.
5331 */
5332 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5333 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5334 fEfl &= ~X86_EFL_RF;
5335 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5336 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5337
5338 /*
5339 * Start making changes.
5340 */
5341 /* Set the new CPL so that stack accesses use it. */
5342 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5343 pVCpu->iem.s.uCpl = uNewCpl;
5344
5345 /* Create the stack frame. */
5346 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5347 RTPTRUNION uStackFrame;
5348 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5349 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5350 if (rcStrict != VINF_SUCCESS)
5351 return rcStrict;
5352 void * const pvStackFrame = uStackFrame.pv;
5353
5354 if (fFlags & IEM_XCPT_FLAGS_ERR)
5355 *uStackFrame.pu64++ = uErr;
5356 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5357 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5358 uStackFrame.pu64[2] = fEfl;
5359 uStackFrame.pu64[3] = pCtx->rsp;
5360 uStackFrame.pu64[4] = pCtx->ss.Sel;
5361 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5362 if (rcStrict != VINF_SUCCESS)
5363 return rcStrict;
5364
5365 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5366 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5367 * after pushing the stack frame? (Write protect the gdt + stack to
5368 * find out.) */
5369 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5370 {
5371 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5372 if (rcStrict != VINF_SUCCESS)
5373 return rcStrict;
5374 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5375 }
5376
5377 /*
5378 * Start comitting the register changes.
5379 */
5380 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5381 * hidden registers when interrupting 32-bit or 16-bit code! */
5382 if (uNewCpl != uOldCpl)
5383 {
5384 pCtx->ss.Sel = 0 | uNewCpl;
5385 pCtx->ss.ValidSel = 0 | uNewCpl;
5386 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5387 pCtx->ss.u32Limit = UINT32_MAX;
5388 pCtx->ss.u64Base = 0;
5389 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5390 }
5391 pCtx->rsp = uNewRsp - cbStackFrame;
5392 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5393 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5394 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5395 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5396 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5398 pCtx->rip = uNewRip;
5399
5400 fEfl &= ~fEflToClear;
5401 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5402
5403 if (fFlags & IEM_XCPT_FLAGS_CR2)
5404 pCtx->cr2 = uCr2;
5405
5406 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5407 iemRaiseXcptAdjustState(pCtx, u8Vector);
5408
5409 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5410}
5411
5412
5413/**
5414 * Implements exceptions and interrupts.
5415 *
5416 * All exceptions and interrupts goes thru this function!
5417 *
5418 * @returns VBox strict status code.
5419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5420 * @param cbInstr The number of bytes to offset rIP by in the return
5421 * address.
5422 * @param u8Vector The interrupt / exception vector number.
5423 * @param fFlags The flags.
5424 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5425 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5426 */
5427DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5428iemRaiseXcptOrInt(PVMCPU pVCpu,
5429 uint8_t cbInstr,
5430 uint8_t u8Vector,
5431 uint32_t fFlags,
5432 uint16_t uErr,
5433 uint64_t uCr2)
5434{
5435 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5436#ifdef IN_RING0
5437 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5438 AssertRCReturn(rc, rc);
5439#endif
5440
5441#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5442 /*
5443 * Flush prefetch buffer
5444 */
5445 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5446#endif
5447
5448 /*
5449 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5450 */
5451 if ( pCtx->eflags.Bits.u1VM
5452 && pCtx->eflags.Bits.u2IOPL != 3
5453 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5454 && (pCtx->cr0 & X86_CR0_PE) )
5455 {
5456 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5457 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5458 u8Vector = X86_XCPT_GP;
5459 uErr = 0;
5460 }
5461#ifdef DBGFTRACE_ENABLED
5462 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5463 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5464 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5465#endif
5466
5467#ifdef VBOX_WITH_NESTED_HWVIRT
5468 if (IEM_IS_SVM_ENABLED(pVCpu))
5469 {
5470 /*
5471 * If the event is being injected as part of VMRUN, it isn't subject to event
5472 * intercepts in the nested-guest. However, secondary exceptions that occur
5473 * during injection of any event -are- subject to exception intercepts.
5474 * See AMD spec. 15.20 "Event Injection".
5475 */
5476 if (!pCtx->hwvirt.svm.fInterceptEvents)
5477 pCtx->hwvirt.svm.fInterceptEvents = 1;
5478 else
5479 {
5480 /*
5481 * Check and handle if the event being raised is intercepted.
5482 */
5483 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5484 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5485 return rcStrict0;
5486 }
5487 }
5488#endif /* VBOX_WITH_NESTED_HWVIRT */
5489
5490 /*
5491 * Do recursion accounting.
5492 */
5493 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5494 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5495 if (pVCpu->iem.s.cXcptRecursions == 0)
5496 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5497 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5498 else
5499 {
5500 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5501 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5502 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5503
5504 if (pVCpu->iem.s.cXcptRecursions >= 3)
5505 {
5506#ifdef DEBUG_bird
5507 AssertFailed();
5508#endif
5509 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5510 }
5511
5512 /*
5513 * Evaluate the sequence of recurring events.
5514 */
5515 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5516 NULL /* pXcptRaiseInfo */);
5517 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5518 { /* likely */ }
5519 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5520 {
5521 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5522 u8Vector = X86_XCPT_DF;
5523 uErr = 0;
5524 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5525 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5526 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5527 }
5528 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5529 {
5530 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5531 return iemInitiateCpuShutdown(pVCpu);
5532 }
5533 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5534 {
5535 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5536 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5537 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5538 return VERR_EM_GUEST_CPU_HANG;
5539 }
5540 else
5541 {
5542 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5543 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5544 return VERR_IEM_IPE_9;
5545 }
5546
5547 /*
5548 * The 'EXT' bit is set when an exception occurs during deliver of an external
5549 * event (such as an interrupt or earlier exception)[1]. Privileged software
5550 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5551 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5552 *
5553 * [1] - Intel spec. 6.13 "Error Code"
5554 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5555 * [3] - Intel Instruction reference for INT n.
5556 */
5557 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5558 && (fFlags & IEM_XCPT_FLAGS_ERR)
5559 && u8Vector != X86_XCPT_PF
5560 && u8Vector != X86_XCPT_DF)
5561 {
5562 uErr |= X86_TRAP_ERR_EXTERNAL;
5563 }
5564 }
5565
5566 pVCpu->iem.s.cXcptRecursions++;
5567 pVCpu->iem.s.uCurXcpt = u8Vector;
5568 pVCpu->iem.s.fCurXcpt = fFlags;
5569 pVCpu->iem.s.uCurXcptErr = uErr;
5570 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5571
5572 /*
5573 * Extensive logging.
5574 */
5575#if defined(LOG_ENABLED) && defined(IN_RING3)
5576 if (LogIs3Enabled())
5577 {
5578 PVM pVM = pVCpu->CTX_SUFF(pVM);
5579 char szRegs[4096];
5580 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5581 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5582 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5583 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5584 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5585 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5586 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5587 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5588 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5589 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5590 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5591 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5592 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5593 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5594 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5595 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5596 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5597 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5598 " efer=%016VR{efer}\n"
5599 " pat=%016VR{pat}\n"
5600 " sf_mask=%016VR{sf_mask}\n"
5601 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5602 " lstar=%016VR{lstar}\n"
5603 " star=%016VR{star} cstar=%016VR{cstar}\n"
5604 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5605 );
5606
5607 char szInstr[256];
5608 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5609 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5610 szInstr, sizeof(szInstr), NULL);
5611 Log3(("%s%s\n", szRegs, szInstr));
5612 }
5613#endif /* LOG_ENABLED */
5614
5615 /*
5616 * Call the mode specific worker function.
5617 */
5618 VBOXSTRICTRC rcStrict;
5619 if (!(pCtx->cr0 & X86_CR0_PE))
5620 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5621 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5622 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5623 else
5624 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5625
5626 /* Flush the prefetch buffer. */
5627#ifdef IEM_WITH_CODE_TLB
5628 pVCpu->iem.s.pbInstrBuf = NULL;
5629#else
5630 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5631#endif
5632
5633 /*
5634 * Unwind.
5635 */
5636 pVCpu->iem.s.cXcptRecursions--;
5637 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5638 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5639 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5640 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5641 return rcStrict;
5642}
5643
5644#ifdef IEM_WITH_SETJMP
5645/**
5646 * See iemRaiseXcptOrInt. Will not return.
5647 */
5648IEM_STATIC DECL_NO_RETURN(void)
5649iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5650 uint8_t cbInstr,
5651 uint8_t u8Vector,
5652 uint32_t fFlags,
5653 uint16_t uErr,
5654 uint64_t uCr2)
5655{
5656 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5657 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5658}
5659#endif
5660
5661
5662/** \#DE - 00. */
5663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5664{
5665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5666}
5667
5668
5669/** \#DB - 01.
5670 * @note This automatically clear DR7.GD. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5672{
5673 /** @todo set/clear RF. */
5674 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5675 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5676}
5677
5678
5679/** \#BR - 05. */
5680DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5681{
5682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5683}
5684
5685
5686/** \#UD - 06. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5688{
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5690}
5691
5692
5693/** \#NM - 07. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5697}
5698
5699
5700/** \#TS(err) - 0a. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5704}
5705
5706
5707/** \#TS(tr) - 0a. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5711 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5712}
5713
5714
5715/** \#TS(0) - 0a. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5719 0, 0);
5720}
5721
5722
5723/** \#TS(err) - 0a. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5727 uSel & X86_SEL_MASK_OFF_RPL, 0);
5728}
5729
5730
5731/** \#NP(err) - 0b. */
5732DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5733{
5734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5735}
5736
5737
5738/** \#NP(sel) - 0b. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5742 uSel & ~X86_SEL_RPL, 0);
5743}
5744
5745
5746/** \#SS(seg) - 0c. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5750 uSel & ~X86_SEL_RPL, 0);
5751}
5752
5753
5754/** \#SS(err) - 0c. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5756{
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5758}
5759
5760
5761/** \#GP(n) - 0d. */
5762DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5763{
5764 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5765}
5766
5767
5768/** \#GP(0) - 0d. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5772}
5773
5774#ifdef IEM_WITH_SETJMP
5775/** \#GP(0) - 0d. */
5776DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5777{
5778 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5779}
5780#endif
5781
5782
5783/** \#GP(sel) - 0d. */
5784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5785{
5786 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5787 Sel & ~X86_SEL_RPL, 0);
5788}
5789
5790
5791/** \#GP(0) - 0d. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5793{
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5795}
5796
5797
5798/** \#GP(sel) - 0d. */
5799DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5800{
5801 NOREF(iSegReg); NOREF(fAccess);
5802 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5803 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5804}
5805
5806#ifdef IEM_WITH_SETJMP
5807/** \#GP(sel) - 0d, longjmp. */
5808DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5809{
5810 NOREF(iSegReg); NOREF(fAccess);
5811 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5812 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5813}
5814#endif
5815
5816/** \#GP(sel) - 0d. */
5817DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5818{
5819 NOREF(Sel);
5820 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5821}
5822
5823#ifdef IEM_WITH_SETJMP
5824/** \#GP(sel) - 0d, longjmp. */
5825DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5826{
5827 NOREF(Sel);
5828 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5829}
5830#endif
5831
5832
5833/** \#GP(sel) - 0d. */
5834DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5835{
5836 NOREF(iSegReg); NOREF(fAccess);
5837 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5838}
5839
5840#ifdef IEM_WITH_SETJMP
5841/** \#GP(sel) - 0d, longjmp. */
5842DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5843 uint32_t fAccess)
5844{
5845 NOREF(iSegReg); NOREF(fAccess);
5846 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5847}
5848#endif
5849
5850
5851/** \#PF(n) - 0e. */
5852DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5853{
5854 uint16_t uErr;
5855 switch (rc)
5856 {
5857 case VERR_PAGE_NOT_PRESENT:
5858 case VERR_PAGE_TABLE_NOT_PRESENT:
5859 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5860 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5861 uErr = 0;
5862 break;
5863
5864 default:
5865 AssertMsgFailed(("%Rrc\n", rc));
5866 /* fall thru */
5867 case VERR_ACCESS_DENIED:
5868 uErr = X86_TRAP_PF_P;
5869 break;
5870
5871 /** @todo reserved */
5872 }
5873
5874 if (pVCpu->iem.s.uCpl == 3)
5875 uErr |= X86_TRAP_PF_US;
5876
5877 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5878 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5879 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5880 uErr |= X86_TRAP_PF_ID;
5881
5882#if 0 /* This is so much non-sense, really. Why was it done like that? */
5883 /* Note! RW access callers reporting a WRITE protection fault, will clear
5884 the READ flag before calling. So, read-modify-write accesses (RW)
5885 can safely be reported as READ faults. */
5886 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5887 uErr |= X86_TRAP_PF_RW;
5888#else
5889 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5890 {
5891 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5892 uErr |= X86_TRAP_PF_RW;
5893 }
5894#endif
5895
5896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5897 uErr, GCPtrWhere);
5898}
5899
5900#ifdef IEM_WITH_SETJMP
5901/** \#PF(n) - 0e, longjmp. */
5902IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5903{
5904 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5905}
5906#endif
5907
5908
5909/** \#MF(0) - 10. */
5910DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5911{
5912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5913}
5914
5915
5916/** \#AC(0) - 11. */
5917DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5918{
5919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5920}
5921
5922
5923/**
5924 * Macro for calling iemCImplRaiseDivideError().
5925 *
5926 * This enables us to add/remove arguments and force different levels of
5927 * inlining as we wish.
5928 *
5929 * @return Strict VBox status code.
5930 */
5931#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5932IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5933{
5934 NOREF(cbInstr);
5935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5936}
5937
5938
5939/**
5940 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5941 *
5942 * This enables us to add/remove arguments and force different levels of
5943 * inlining as we wish.
5944 *
5945 * @return Strict VBox status code.
5946 */
5947#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5948IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5949{
5950 NOREF(cbInstr);
5951 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5952}
5953
5954
5955/**
5956 * Macro for calling iemCImplRaiseInvalidOpcode().
5957 *
5958 * This enables us to add/remove arguments and force different levels of
5959 * inlining as we wish.
5960 *
5961 * @return Strict VBox status code.
5962 */
5963#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5964IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5965{
5966 NOREF(cbInstr);
5967 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5968}
5969
5970
5971/** @} */
5972
5973
5974/*
5975 *
5976 * Helpers routines.
5977 * Helpers routines.
5978 * Helpers routines.
5979 *
5980 */
5981
5982/**
5983 * Recalculates the effective operand size.
5984 *
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 */
5987IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5988{
5989 switch (pVCpu->iem.s.enmCpuMode)
5990 {
5991 case IEMMODE_16BIT:
5992 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5993 break;
5994 case IEMMODE_32BIT:
5995 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5996 break;
5997 case IEMMODE_64BIT:
5998 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5999 {
6000 case 0:
6001 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6002 break;
6003 case IEM_OP_PRF_SIZE_OP:
6004 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6005 break;
6006 case IEM_OP_PRF_SIZE_REX_W:
6007 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6008 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6009 break;
6010 }
6011 break;
6012 default:
6013 AssertFailed();
6014 }
6015}
6016
6017
6018/**
6019 * Sets the default operand size to 64-bit and recalculates the effective
6020 * operand size.
6021 *
6022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6023 */
6024IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6025{
6026 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6027 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6028 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6029 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6030 else
6031 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6032}
6033
6034
6035/*
6036 *
6037 * Common opcode decoders.
6038 * Common opcode decoders.
6039 * Common opcode decoders.
6040 *
6041 */
6042//#include <iprt/mem.h>
6043
6044/**
6045 * Used to add extra details about a stub case.
6046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6047 */
6048IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6049{
6050#if defined(LOG_ENABLED) && defined(IN_RING3)
6051 PVM pVM = pVCpu->CTX_SUFF(pVM);
6052 char szRegs[4096];
6053 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6054 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6055 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6056 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6057 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6058 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6059 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6060 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6061 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6062 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6063 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6064 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6065 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6066 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6067 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6068 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6069 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6070 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6071 " efer=%016VR{efer}\n"
6072 " pat=%016VR{pat}\n"
6073 " sf_mask=%016VR{sf_mask}\n"
6074 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6075 " lstar=%016VR{lstar}\n"
6076 " star=%016VR{star} cstar=%016VR{cstar}\n"
6077 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6078 );
6079
6080 char szInstr[256];
6081 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6082 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6083 szInstr, sizeof(szInstr), NULL);
6084
6085 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6086#else
6087 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6088#endif
6089}
6090
6091/**
6092 * Complains about a stub.
6093 *
6094 * Providing two versions of this macro, one for daily use and one for use when
6095 * working on IEM.
6096 */
6097#if 0
6098# define IEMOP_BITCH_ABOUT_STUB() \
6099 do { \
6100 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6101 iemOpStubMsg2(pVCpu); \
6102 RTAssertPanic(); \
6103 } while (0)
6104#else
6105# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6106#endif
6107
6108/** Stubs an opcode. */
6109#define FNIEMOP_STUB(a_Name) \
6110 FNIEMOP_DEF(a_Name) \
6111 { \
6112 RT_NOREF_PV(pVCpu); \
6113 IEMOP_BITCH_ABOUT_STUB(); \
6114 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6115 } \
6116 typedef int ignore_semicolon
6117
6118/** Stubs an opcode. */
6119#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6120 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6121 { \
6122 RT_NOREF_PV(pVCpu); \
6123 RT_NOREF_PV(a_Name0); \
6124 IEMOP_BITCH_ABOUT_STUB(); \
6125 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6126 } \
6127 typedef int ignore_semicolon
6128
6129/** Stubs an opcode which currently should raise \#UD. */
6130#define FNIEMOP_UD_STUB(a_Name) \
6131 FNIEMOP_DEF(a_Name) \
6132 { \
6133 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6134 return IEMOP_RAISE_INVALID_OPCODE(); \
6135 } \
6136 typedef int ignore_semicolon
6137
6138/** Stubs an opcode which currently should raise \#UD. */
6139#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6140 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6141 { \
6142 RT_NOREF_PV(pVCpu); \
6143 RT_NOREF_PV(a_Name0); \
6144 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6145 return IEMOP_RAISE_INVALID_OPCODE(); \
6146 } \
6147 typedef int ignore_semicolon
6148
6149
6150
6151/** @name Register Access.
6152 * @{
6153 */
6154
6155/**
6156 * Gets a reference (pointer) to the specified hidden segment register.
6157 *
6158 * @returns Hidden register reference.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6166 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6167
6168#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6169 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6170 { /* likely */ }
6171 else
6172 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6173#else
6174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6175#endif
6176 return pSReg;
6177}
6178
6179
6180/**
6181 * Ensures that the given hidden segment register is up to date.
6182 *
6183 * @returns Hidden register reference.
6184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6185 * @param pSReg The segment register.
6186 */
6187IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6188{
6189#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6190 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6191 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6192#else
6193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6194 NOREF(pVCpu);
6195#endif
6196 return pSReg;
6197}
6198
6199
6200/**
6201 * Gets a reference (pointer) to the specified segment register (the selector
6202 * value).
6203 *
6204 * @returns Pointer to the selector variable.
6205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6206 * @param iSegReg The segment register.
6207 */
6208DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6209{
6210 Assert(iSegReg < X86_SREG_COUNT);
6211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6212 return &pCtx->aSRegs[iSegReg].Sel;
6213}
6214
6215
6216/**
6217 * Fetches the selector value of a segment register.
6218 *
6219 * @returns The selector value.
6220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6221 * @param iSegReg The segment register.
6222 */
6223DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6224{
6225 Assert(iSegReg < X86_SREG_COUNT);
6226 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6227}
6228
6229
6230/**
6231 * Gets a reference (pointer) to the specified general purpose register.
6232 *
6233 * @returns Register reference.
6234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6235 * @param iReg The general purpose register.
6236 */
6237DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6238{
6239 Assert(iReg < 16);
6240 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6241 return &pCtx->aGRegs[iReg];
6242}
6243
6244
6245/**
6246 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6247 *
6248 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6249 *
6250 * @returns Register reference.
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 * @param iReg The register.
6253 */
6254DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6255{
6256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6257 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6258 {
6259 Assert(iReg < 16);
6260 return &pCtx->aGRegs[iReg].u8;
6261 }
6262 /* high 8-bit register. */
6263 Assert(iReg < 8);
6264 return &pCtx->aGRegs[iReg & 3].bHi;
6265}
6266
6267
6268/**
6269 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6270 *
6271 * @returns Register reference.
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 * @param iReg The register.
6274 */
6275DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6276{
6277 Assert(iReg < 16);
6278 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6279 return &pCtx->aGRegs[iReg].u16;
6280}
6281
6282
6283/**
6284 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6285 *
6286 * @returns Register reference.
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param iReg The register.
6289 */
6290DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6291{
6292 Assert(iReg < 16);
6293 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6294 return &pCtx->aGRegs[iReg].u32;
6295}
6296
6297
6298/**
6299 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6300 *
6301 * @returns Register reference.
6302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6303 * @param iReg The register.
6304 */
6305DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6306{
6307 Assert(iReg < 64);
6308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6309 return &pCtx->aGRegs[iReg].u64;
6310}
6311
6312
6313/**
6314 * Fetches the value of a 8-bit general purpose register.
6315 *
6316 * @returns The register value.
6317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6318 * @param iReg The register.
6319 */
6320DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6321{
6322 return *iemGRegRefU8(pVCpu, iReg);
6323}
6324
6325
6326/**
6327 * Fetches the value of a 16-bit general purpose register.
6328 *
6329 * @returns The register value.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iReg The register.
6332 */
6333DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6334{
6335 Assert(iReg < 16);
6336 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6337}
6338
6339
6340/**
6341 * Fetches the value of a 32-bit general purpose register.
6342 *
6343 * @returns The register value.
6344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6345 * @param iReg The register.
6346 */
6347DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6348{
6349 Assert(iReg < 16);
6350 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6351}
6352
6353
6354/**
6355 * Fetches the value of a 64-bit general purpose register.
6356 *
6357 * @returns The register value.
6358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6359 * @param iReg The register.
6360 */
6361DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6362{
6363 Assert(iReg < 16);
6364 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6365}
6366
6367
6368/**
6369 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6370 *
6371 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6372 * segment limit.
6373 *
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 * @param offNextInstr The offset of the next instruction.
6376 */
6377IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6378{
6379 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6380 switch (pVCpu->iem.s.enmEffOpSize)
6381 {
6382 case IEMMODE_16BIT:
6383 {
6384 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6385 if ( uNewIp > pCtx->cs.u32Limit
6386 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6387 return iemRaiseGeneralProtectionFault0(pVCpu);
6388 pCtx->rip = uNewIp;
6389 break;
6390 }
6391
6392 case IEMMODE_32BIT:
6393 {
6394 Assert(pCtx->rip <= UINT32_MAX);
6395 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6396
6397 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6398 if (uNewEip > pCtx->cs.u32Limit)
6399 return iemRaiseGeneralProtectionFault0(pVCpu);
6400 pCtx->rip = uNewEip;
6401 break;
6402 }
6403
6404 case IEMMODE_64BIT:
6405 {
6406 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6407
6408 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6409 if (!IEM_IS_CANONICAL(uNewRip))
6410 return iemRaiseGeneralProtectionFault0(pVCpu);
6411 pCtx->rip = uNewRip;
6412 break;
6413 }
6414
6415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6416 }
6417
6418 pCtx->eflags.Bits.u1RF = 0;
6419
6420#ifndef IEM_WITH_CODE_TLB
6421 /* Flush the prefetch buffer. */
6422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6423#endif
6424
6425 return VINF_SUCCESS;
6426}
6427
6428
6429/**
6430 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6431 *
6432 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6433 * segment limit.
6434 *
6435 * @returns Strict VBox status code.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param offNextInstr The offset of the next instruction.
6438 */
6439IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6440{
6441 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6442 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6443
6444 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6445 if ( uNewIp > pCtx->cs.u32Limit
6446 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6447 return iemRaiseGeneralProtectionFault0(pVCpu);
6448 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6449 pCtx->rip = uNewIp;
6450 pCtx->eflags.Bits.u1RF = 0;
6451
6452#ifndef IEM_WITH_CODE_TLB
6453 /* Flush the prefetch buffer. */
6454 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6455#endif
6456
6457 return VINF_SUCCESS;
6458}
6459
6460
6461/**
6462 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6463 *
6464 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6465 * segment limit.
6466 *
6467 * @returns Strict VBox status code.
6468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6469 * @param offNextInstr The offset of the next instruction.
6470 */
6471IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6472{
6473 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6474 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6475
6476 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6477 {
6478 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6479
6480 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6481 if (uNewEip > pCtx->cs.u32Limit)
6482 return iemRaiseGeneralProtectionFault0(pVCpu);
6483 pCtx->rip = uNewEip;
6484 }
6485 else
6486 {
6487 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6488
6489 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6490 if (!IEM_IS_CANONICAL(uNewRip))
6491 return iemRaiseGeneralProtectionFault0(pVCpu);
6492 pCtx->rip = uNewRip;
6493 }
6494 pCtx->eflags.Bits.u1RF = 0;
6495
6496#ifndef IEM_WITH_CODE_TLB
6497 /* Flush the prefetch buffer. */
6498 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6499#endif
6500
6501 return VINF_SUCCESS;
6502}
6503
6504
6505/**
6506 * Performs a near jump to the specified address.
6507 *
6508 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6509 * segment limit.
6510 *
6511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6512 * @param uNewRip The new RIP value.
6513 */
6514IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6515{
6516 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6517 switch (pVCpu->iem.s.enmEffOpSize)
6518 {
6519 case IEMMODE_16BIT:
6520 {
6521 Assert(uNewRip <= UINT16_MAX);
6522 if ( uNewRip > pCtx->cs.u32Limit
6523 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 /** @todo Test 16-bit jump in 64-bit mode. */
6526 pCtx->rip = uNewRip;
6527 break;
6528 }
6529
6530 case IEMMODE_32BIT:
6531 {
6532 Assert(uNewRip <= UINT32_MAX);
6533 Assert(pCtx->rip <= UINT32_MAX);
6534 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6535
6536 if (uNewRip > pCtx->cs.u32Limit)
6537 return iemRaiseGeneralProtectionFault0(pVCpu);
6538 pCtx->rip = uNewRip;
6539 break;
6540 }
6541
6542 case IEMMODE_64BIT:
6543 {
6544 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6545
6546 if (!IEM_IS_CANONICAL(uNewRip))
6547 return iemRaiseGeneralProtectionFault0(pVCpu);
6548 pCtx->rip = uNewRip;
6549 break;
6550 }
6551
6552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6553 }
6554
6555 pCtx->eflags.Bits.u1RF = 0;
6556
6557#ifndef IEM_WITH_CODE_TLB
6558 /* Flush the prefetch buffer. */
6559 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6560#endif
6561
6562 return VINF_SUCCESS;
6563}
6564
6565
6566/**
6567 * Get the address of the top of the stack.
6568 *
6569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6570 * @param pCtx The CPU context which SP/ESP/RSP should be
6571 * read.
6572 */
6573DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6574{
6575 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6576 return pCtx->rsp;
6577 if (pCtx->ss.Attr.n.u1DefBig)
6578 return pCtx->esp;
6579 return pCtx->sp;
6580}
6581
6582
6583/**
6584 * Updates the RIP/EIP/IP to point to the next instruction.
6585 *
6586 * This function leaves the EFLAGS.RF flag alone.
6587 *
6588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6589 * @param cbInstr The number of bytes to add.
6590 */
6591IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6592{
6593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6594 switch (pVCpu->iem.s.enmCpuMode)
6595 {
6596 case IEMMODE_16BIT:
6597 Assert(pCtx->rip <= UINT16_MAX);
6598 pCtx->eip += cbInstr;
6599 pCtx->eip &= UINT32_C(0xffff);
6600 break;
6601
6602 case IEMMODE_32BIT:
6603 pCtx->eip += cbInstr;
6604 Assert(pCtx->rip <= UINT32_MAX);
6605 break;
6606
6607 case IEMMODE_64BIT:
6608 pCtx->rip += cbInstr;
6609 break;
6610 default: AssertFailed();
6611 }
6612}
6613
6614
6615#if 0
6616/**
6617 * Updates the RIP/EIP/IP to point to the next instruction.
6618 *
6619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6620 */
6621IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6622{
6623 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6624}
6625#endif
6626
6627
6628
6629/**
6630 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6631 *
6632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6633 * @param cbInstr The number of bytes to add.
6634 */
6635IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6636{
6637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6638
6639 pCtx->eflags.Bits.u1RF = 0;
6640
6641 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6642#if ARCH_BITS >= 64
6643 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6644 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6645 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6646#else
6647 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6648 pCtx->rip += cbInstr;
6649 else
6650 {
6651 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6652 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6653 }
6654#endif
6655}
6656
6657
6658/**
6659 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 */
6663IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6664{
6665 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6666}
6667
6668
6669/**
6670 * Adds to the stack pointer.
6671 *
6672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6673 * @param pCtx The CPU context which SP/ESP/RSP should be
6674 * updated.
6675 * @param cbToAdd The number of bytes to add (8-bit!).
6676 */
6677DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6678{
6679 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6680 pCtx->rsp += cbToAdd;
6681 else if (pCtx->ss.Attr.n.u1DefBig)
6682 pCtx->esp += cbToAdd;
6683 else
6684 pCtx->sp += cbToAdd;
6685}
6686
6687
6688/**
6689 * Subtracts from the stack pointer.
6690 *
6691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6692 * @param pCtx The CPU context which SP/ESP/RSP should be
6693 * updated.
6694 * @param cbToSub The number of bytes to subtract (8-bit!).
6695 */
6696DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6697{
6698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6699 pCtx->rsp -= cbToSub;
6700 else if (pCtx->ss.Attr.n.u1DefBig)
6701 pCtx->esp -= cbToSub;
6702 else
6703 pCtx->sp -= cbToSub;
6704}
6705
6706
6707/**
6708 * Adds to the temporary stack pointer.
6709 *
6710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6711 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6712 * @param cbToAdd The number of bytes to add (16-bit).
6713 * @param pCtx Where to get the current stack mode.
6714 */
6715DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6716{
6717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6718 pTmpRsp->u += cbToAdd;
6719 else if (pCtx->ss.Attr.n.u1DefBig)
6720 pTmpRsp->DWords.dw0 += cbToAdd;
6721 else
6722 pTmpRsp->Words.w0 += cbToAdd;
6723}
6724
6725
6726/**
6727 * Subtracts from the temporary stack pointer.
6728 *
6729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6730 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6731 * @param cbToSub The number of bytes to subtract.
6732 * @param pCtx Where to get the current stack mode.
6733 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6734 * expecting that.
6735 */
6736DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6737{
6738 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6739 pTmpRsp->u -= cbToSub;
6740 else if (pCtx->ss.Attr.n.u1DefBig)
6741 pTmpRsp->DWords.dw0 -= cbToSub;
6742 else
6743 pTmpRsp->Words.w0 -= cbToSub;
6744}
6745
6746
6747/**
6748 * Calculates the effective stack address for a push of the specified size as
6749 * well as the new RSP value (upper bits may be masked).
6750 *
6751 * @returns Effective stack addressf for the push.
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param pCtx Where to get the current stack mode.
6754 * @param cbItem The size of the stack item to pop.
6755 * @param puNewRsp Where to return the new RSP value.
6756 */
6757DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6758{
6759 RTUINT64U uTmpRsp;
6760 RTGCPTR GCPtrTop;
6761 uTmpRsp.u = pCtx->rsp;
6762
6763 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6764 GCPtrTop = uTmpRsp.u -= cbItem;
6765 else if (pCtx->ss.Attr.n.u1DefBig)
6766 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6767 else
6768 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6769 *puNewRsp = uTmpRsp.u;
6770 return GCPtrTop;
6771}
6772
6773
6774/**
6775 * Gets the current stack pointer and calculates the value after a pop of the
6776 * specified size.
6777 *
6778 * @returns Current stack pointer.
6779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6780 * @param pCtx Where to get the current stack mode.
6781 * @param cbItem The size of the stack item to pop.
6782 * @param puNewRsp Where to return the new RSP value.
6783 */
6784DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6785{
6786 RTUINT64U uTmpRsp;
6787 RTGCPTR GCPtrTop;
6788 uTmpRsp.u = pCtx->rsp;
6789
6790 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6791 {
6792 GCPtrTop = uTmpRsp.u;
6793 uTmpRsp.u += cbItem;
6794 }
6795 else if (pCtx->ss.Attr.n.u1DefBig)
6796 {
6797 GCPtrTop = uTmpRsp.DWords.dw0;
6798 uTmpRsp.DWords.dw0 += cbItem;
6799 }
6800 else
6801 {
6802 GCPtrTop = uTmpRsp.Words.w0;
6803 uTmpRsp.Words.w0 += cbItem;
6804 }
6805 *puNewRsp = uTmpRsp.u;
6806 return GCPtrTop;
6807}
6808
6809
6810/**
6811 * Calculates the effective stack address for a push of the specified size as
6812 * well as the new temporary RSP value (upper bits may be masked).
6813 *
6814 * @returns Effective stack addressf for the push.
6815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6816 * @param pCtx Where to get the current stack mode.
6817 * @param pTmpRsp The temporary stack pointer. This is updated.
6818 * @param cbItem The size of the stack item to pop.
6819 */
6820DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6821{
6822 RTGCPTR GCPtrTop;
6823
6824 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6825 GCPtrTop = pTmpRsp->u -= cbItem;
6826 else if (pCtx->ss.Attr.n.u1DefBig)
6827 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6828 else
6829 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6830 return GCPtrTop;
6831}
6832
6833
6834/**
6835 * Gets the effective stack address for a pop of the specified size and
6836 * calculates and updates the temporary RSP.
6837 *
6838 * @returns Current stack pointer.
6839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6840 * @param pCtx Where to get the current stack mode.
6841 * @param pTmpRsp The temporary stack pointer. This is updated.
6842 * @param cbItem The size of the stack item to pop.
6843 */
6844DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6845{
6846 RTGCPTR GCPtrTop;
6847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6848 {
6849 GCPtrTop = pTmpRsp->u;
6850 pTmpRsp->u += cbItem;
6851 }
6852 else if (pCtx->ss.Attr.n.u1DefBig)
6853 {
6854 GCPtrTop = pTmpRsp->DWords.dw0;
6855 pTmpRsp->DWords.dw0 += cbItem;
6856 }
6857 else
6858 {
6859 GCPtrTop = pTmpRsp->Words.w0;
6860 pTmpRsp->Words.w0 += cbItem;
6861 }
6862 return GCPtrTop;
6863}
6864
6865/** @} */
6866
6867
6868/** @name FPU access and helpers.
6869 *
6870 * @{
6871 */
6872
6873
6874/**
6875 * Hook for preparing to use the host FPU.
6876 *
6877 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6878 *
6879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6880 */
6881DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6882{
6883#ifdef IN_RING3
6884 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6885#else
6886 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6887#endif
6888}
6889
6890
6891/**
6892 * Hook for preparing to use the host FPU for SSE.
6893 *
6894 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6895 *
6896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6897 */
6898DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6899{
6900 iemFpuPrepareUsage(pVCpu);
6901}
6902
6903
6904/**
6905 * Hook for preparing to use the host FPU for AVX.
6906 *
6907 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6912{
6913 iemFpuPrepareUsage(pVCpu);
6914}
6915
6916
6917/**
6918 * Hook for actualizing the guest FPU state before the interpreter reads it.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6925{
6926#ifdef IN_RING3
6927 NOREF(pVCpu);
6928#else
6929 CPUMRZFpuStateActualizeForRead(pVCpu);
6930#endif
6931}
6932
6933
6934/**
6935 * Hook for actualizing the guest FPU state before the interpreter changes it.
6936 *
6937 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6938 *
6939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6940 */
6941DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6942{
6943#ifdef IN_RING3
6944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6945#else
6946 CPUMRZFpuStateActualizeForChange(pVCpu);
6947#endif
6948}
6949
6950
6951/**
6952 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6953 * only.
6954 *
6955 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6956 *
6957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6958 */
6959DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6960{
6961#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6962 NOREF(pVCpu);
6963#else
6964 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6965#endif
6966}
6967
6968
6969/**
6970 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6971 * read+write.
6972 *
6973 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6974 *
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 */
6977DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6978{
6979#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6980 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6981#else
6982 CPUMRZFpuStateActualizeForChange(pVCpu);
6983#endif
6984}
6985
6986
6987/**
6988 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6989 * only.
6990 *
6991 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 */
6995DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6996{
6997#ifdef IN_RING3
6998 NOREF(pVCpu);
6999#else
7000 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7001#endif
7002}
7003
7004
7005/**
7006 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7007 * read+write.
7008 *
7009 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7010 *
7011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7012 */
7013DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7014{
7015#ifdef IN_RING3
7016 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7017#else
7018 CPUMRZFpuStateActualizeForChange(pVCpu);
7019#endif
7020}
7021
7022
7023/**
7024 * Stores a QNaN value into a FPU register.
7025 *
7026 * @param pReg Pointer to the register.
7027 */
7028DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7029{
7030 pReg->au32[0] = UINT32_C(0x00000000);
7031 pReg->au32[1] = UINT32_C(0xc0000000);
7032 pReg->au16[4] = UINT16_C(0xffff);
7033}
7034
7035
7036/**
7037 * Updates the FOP, FPU.CS and FPUIP registers.
7038 *
7039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7040 * @param pCtx The CPU context.
7041 * @param pFpuCtx The FPU context.
7042 */
7043DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7044{
7045 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7046 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7047 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7048 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7049 {
7050 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7051 * happens in real mode here based on the fnsave and fnstenv images. */
7052 pFpuCtx->CS = 0;
7053 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7054 }
7055 else
7056 {
7057 pFpuCtx->CS = pCtx->cs.Sel;
7058 pFpuCtx->FPUIP = pCtx->rip;
7059 }
7060}
7061
7062
7063/**
7064 * Updates the x87.DS and FPUDP registers.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 * @param pCtx The CPU context.
7068 * @param pFpuCtx The FPU context.
7069 * @param iEffSeg The effective segment register.
7070 * @param GCPtrEff The effective address relative to @a iEffSeg.
7071 */
7072DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7073{
7074 RTSEL sel;
7075 switch (iEffSeg)
7076 {
7077 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7078 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7079 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7080 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7081 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7082 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7083 default:
7084 AssertMsgFailed(("%d\n", iEffSeg));
7085 sel = pCtx->ds.Sel;
7086 }
7087 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7088 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7089 {
7090 pFpuCtx->DS = 0;
7091 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7092 }
7093 else
7094 {
7095 pFpuCtx->DS = sel;
7096 pFpuCtx->FPUDP = GCPtrEff;
7097 }
7098}
7099
7100
7101/**
7102 * Rotates the stack registers in the push direction.
7103 *
7104 * @param pFpuCtx The FPU context.
7105 * @remarks This is a complete waste of time, but fxsave stores the registers in
7106 * stack order.
7107 */
7108DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7109{
7110 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7111 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7112 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7113 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7114 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7115 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7116 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7117 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7118 pFpuCtx->aRegs[0].r80 = r80Tmp;
7119}
7120
7121
7122/**
7123 * Rotates the stack registers in the pop direction.
7124 *
7125 * @param pFpuCtx The FPU context.
7126 * @remarks This is a complete waste of time, but fxsave stores the registers in
7127 * stack order.
7128 */
7129DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7130{
7131 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7132 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7133 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7134 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7135 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7136 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7137 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7138 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7139 pFpuCtx->aRegs[7].r80 = r80Tmp;
7140}
7141
7142
7143/**
7144 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7145 * exception prevents it.
7146 *
7147 * @param pResult The FPU operation result to push.
7148 * @param pFpuCtx The FPU context.
7149 */
7150IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7151{
7152 /* Update FSW and bail if there are pending exceptions afterwards. */
7153 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7154 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7155 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7156 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7157 {
7158 pFpuCtx->FSW = fFsw;
7159 return;
7160 }
7161
7162 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7163 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7164 {
7165 /* All is fine, push the actual value. */
7166 pFpuCtx->FTW |= RT_BIT(iNewTop);
7167 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7168 }
7169 else if (pFpuCtx->FCW & X86_FCW_IM)
7170 {
7171 /* Masked stack overflow, push QNaN. */
7172 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7173 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7174 }
7175 else
7176 {
7177 /* Raise stack overflow, don't push anything. */
7178 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7179 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7180 return;
7181 }
7182
7183 fFsw &= ~X86_FSW_TOP_MASK;
7184 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7185 pFpuCtx->FSW = fFsw;
7186
7187 iemFpuRotateStackPush(pFpuCtx);
7188}
7189
7190
7191/**
7192 * Stores a result in a FPU register and updates the FSW and FTW.
7193 *
7194 * @param pFpuCtx The FPU context.
7195 * @param pResult The result to store.
7196 * @param iStReg Which FPU register to store it in.
7197 */
7198IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7199{
7200 Assert(iStReg < 8);
7201 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7202 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7203 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7204 pFpuCtx->FTW |= RT_BIT(iReg);
7205 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7206}
7207
7208
7209/**
7210 * Only updates the FPU status word (FSW) with the result of the current
7211 * instruction.
7212 *
7213 * @param pFpuCtx The FPU context.
7214 * @param u16FSW The FSW output of the current instruction.
7215 */
7216IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7217{
7218 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7219 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7220}
7221
7222
7223/**
7224 * Pops one item off the FPU stack if no pending exception prevents it.
7225 *
7226 * @param pFpuCtx The FPU context.
7227 */
7228IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7229{
7230 /* Check pending exceptions. */
7231 uint16_t uFSW = pFpuCtx->FSW;
7232 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7233 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7234 return;
7235
7236 /* TOP--. */
7237 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7238 uFSW &= ~X86_FSW_TOP_MASK;
7239 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7240 pFpuCtx->FSW = uFSW;
7241
7242 /* Mark the previous ST0 as empty. */
7243 iOldTop >>= X86_FSW_TOP_SHIFT;
7244 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7245
7246 /* Rotate the registers. */
7247 iemFpuRotateStackPop(pFpuCtx);
7248}
7249
7250
7251/**
7252 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pResult The FPU operation result to push.
7256 */
7257IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7258{
7259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7260 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262 iemFpuMaybePushResult(pResult, pFpuCtx);
7263}
7264
7265
7266/**
7267 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7268 * and sets FPUDP and FPUDS.
7269 *
7270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7271 * @param pResult The FPU operation result to push.
7272 * @param iEffSeg The effective segment register.
7273 * @param GCPtrEff The effective address relative to @a iEffSeg.
7274 */
7275IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7276{
7277 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7278 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7279 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7280 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7281 iemFpuMaybePushResult(pResult, pFpuCtx);
7282}
7283
7284
7285/**
7286 * Replace ST0 with the first value and push the second onto the FPU stack,
7287 * unless a pending exception prevents it.
7288 *
7289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7290 * @param pResult The FPU operation result to store and push.
7291 */
7292IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7293{
7294 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7295 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7296 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7297
7298 /* Update FSW and bail if there are pending exceptions afterwards. */
7299 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7300 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7301 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7302 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7303 {
7304 pFpuCtx->FSW = fFsw;
7305 return;
7306 }
7307
7308 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7309 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7310 {
7311 /* All is fine, push the actual value. */
7312 pFpuCtx->FTW |= RT_BIT(iNewTop);
7313 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7314 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7315 }
7316 else if (pFpuCtx->FCW & X86_FCW_IM)
7317 {
7318 /* Masked stack overflow, push QNaN. */
7319 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7320 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7321 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7322 }
7323 else
7324 {
7325 /* Raise stack overflow, don't push anything. */
7326 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7327 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7328 return;
7329 }
7330
7331 fFsw &= ~X86_FSW_TOP_MASK;
7332 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7333 pFpuCtx->FSW = fFsw;
7334
7335 iemFpuRotateStackPush(pFpuCtx);
7336}
7337
7338
7339/**
7340 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7341 * FOP.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pResult The result to store.
7345 * @param iStReg Which FPU register to store it in.
7346 */
7347IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7348{
7349 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7350 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7351 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7352 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7353}
7354
7355
7356/**
7357 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7358 * FOP, and then pops the stack.
7359 *
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param pResult The result to store.
7362 * @param iStReg Which FPU register to store it in.
7363 */
7364IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7365{
7366 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7367 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7368 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7369 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7370 iemFpuMaybePopOne(pFpuCtx);
7371}
7372
7373
7374/**
7375 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7376 * FPUDP, and FPUDS.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param pResult The result to store.
7380 * @param iStReg Which FPU register to store it in.
7381 * @param iEffSeg The effective memory operand selector register.
7382 * @param GCPtrEff The effective memory operand offset.
7383 */
7384IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7385 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7386{
7387 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7388 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7389 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7390 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7391 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7392}
7393
7394
7395/**
7396 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7397 * FPUDP, and FPUDS, and then pops the stack.
7398 *
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 * @param pResult The result to store.
7401 * @param iStReg Which FPU register to store it in.
7402 * @param iEffSeg The effective memory operand selector register.
7403 * @param GCPtrEff The effective memory operand offset.
7404 */
7405IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7406 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7407{
7408 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7409 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7410 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7411 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7412 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7413 iemFpuMaybePopOne(pFpuCtx);
7414}
7415
7416
7417/**
7418 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7419 *
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 */
7422IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7423{
7424 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7425 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7426 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7427}
7428
7429
7430/**
7431 * Marks the specified stack register as free (for FFREE).
7432 *
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 * @param iStReg The register to free.
7435 */
7436IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7437{
7438 Assert(iStReg < 8);
7439 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7440 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7441 pFpuCtx->FTW &= ~RT_BIT(iReg);
7442}
7443
7444
7445/**
7446 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 */
7450IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7451{
7452 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7453 uint16_t uFsw = pFpuCtx->FSW;
7454 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7455 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7456 uFsw &= ~X86_FSW_TOP_MASK;
7457 uFsw |= uTop;
7458 pFpuCtx->FSW = uFsw;
7459}
7460
7461
7462/**
7463 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 */
7467IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7468{
7469 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7470 uint16_t uFsw = pFpuCtx->FSW;
7471 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7472 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7473 uFsw &= ~X86_FSW_TOP_MASK;
7474 uFsw |= uTop;
7475 pFpuCtx->FSW = uFsw;
7476}
7477
7478
7479/**
7480 * Updates the FSW, FOP, FPUIP, and FPUCS.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param u16FSW The FSW from the current instruction.
7484 */
7485IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7486{
7487 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7488 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7489 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7490 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7491}
7492
7493
7494/**
7495 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7496 *
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param u16FSW The FSW from the current instruction.
7499 */
7500IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7501{
7502 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7503 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7504 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7505 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7506 iemFpuMaybePopOne(pFpuCtx);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 * @param iEffSeg The effective memory operand selector register.
7516 * @param GCPtrEff The effective memory operand offset.
7517 */
7518IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7519{
7520 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7524 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7525}
7526
7527
7528/**
7529 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7530 *
7531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7532 * @param u16FSW The FSW from the current instruction.
7533 */
7534IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7535{
7536 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7537 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7539 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7540 iemFpuMaybePopOne(pFpuCtx);
7541 iemFpuMaybePopOne(pFpuCtx);
7542}
7543
7544
7545/**
7546 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7547 *
7548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7549 * @param u16FSW The FSW from the current instruction.
7550 * @param iEffSeg The effective memory operand selector register.
7551 * @param GCPtrEff The effective memory operand offset.
7552 */
7553IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7554{
7555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7556 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7557 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7558 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7559 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7560 iemFpuMaybePopOne(pFpuCtx);
7561}
7562
7563
7564/**
7565 * Worker routine for raising an FPU stack underflow exception.
7566 *
7567 * @param pFpuCtx The FPU context.
7568 * @param iStReg The stack register being accessed.
7569 */
7570IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7571{
7572 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7573 if (pFpuCtx->FCW & X86_FCW_IM)
7574 {
7575 /* Masked underflow. */
7576 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7577 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7578 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7579 if (iStReg != UINT8_MAX)
7580 {
7581 pFpuCtx->FTW |= RT_BIT(iReg);
7582 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7583 }
7584 }
7585 else
7586 {
7587 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7588 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7589 }
7590}
7591
7592
7593/**
7594 * Raises a FPU stack underflow exception.
7595 *
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 * @param iStReg The destination register that should be loaded
7598 * with QNaN if \#IS is not masked. Specify
7599 * UINT8_MAX if none (like for fcom).
7600 */
7601DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7602{
7603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7604 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7606 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7607}
7608
7609
7610DECL_NO_INLINE(IEM_STATIC, void)
7611iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7612{
7613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7614 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7616 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7617 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7618}
7619
7620
7621DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7622{
7623 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7624 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7626 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7627 iemFpuMaybePopOne(pFpuCtx);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void)
7632iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7633{
7634 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7635 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7636 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7637 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7638 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7639 iemFpuMaybePopOne(pFpuCtx);
7640}
7641
7642
7643DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7644{
7645 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7646 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7647 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7648 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7649 iemFpuMaybePopOne(pFpuCtx);
7650 iemFpuMaybePopOne(pFpuCtx);
7651}
7652
7653
7654DECL_NO_INLINE(IEM_STATIC, void)
7655iemFpuStackPushUnderflow(PVMCPU pVCpu)
7656{
7657 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7658 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7659 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7660
7661 if (pFpuCtx->FCW & X86_FCW_IM)
7662 {
7663 /* Masked overflow - Push QNaN. */
7664 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7665 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7666 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7667 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7668 pFpuCtx->FTW |= RT_BIT(iNewTop);
7669 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7670 iemFpuRotateStackPush(pFpuCtx);
7671 }
7672 else
7673 {
7674 /* Exception pending - don't change TOP or the register stack. */
7675 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7676 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7677 }
7678}
7679
7680
7681DECL_NO_INLINE(IEM_STATIC, void)
7682iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7683{
7684 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7685 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7686 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7687
7688 if (pFpuCtx->FCW & X86_FCW_IM)
7689 {
7690 /* Masked overflow - Push QNaN. */
7691 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7692 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7693 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7694 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7695 pFpuCtx->FTW |= RT_BIT(iNewTop);
7696 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7697 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7698 iemFpuRotateStackPush(pFpuCtx);
7699 }
7700 else
7701 {
7702 /* Exception pending - don't change TOP or the register stack. */
7703 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7704 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7705 }
7706}
7707
7708
7709/**
7710 * Worker routine for raising an FPU stack overflow exception on a push.
7711 *
7712 * @param pFpuCtx The FPU context.
7713 */
7714IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7715{
7716 if (pFpuCtx->FCW & X86_FCW_IM)
7717 {
7718 /* Masked overflow. */
7719 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7720 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7721 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7722 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7723 pFpuCtx->FTW |= RT_BIT(iNewTop);
7724 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7725 iemFpuRotateStackPush(pFpuCtx);
7726 }
7727 else
7728 {
7729 /* Exception pending - don't change TOP or the register stack. */
7730 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7731 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7732 }
7733}
7734
7735
7736/**
7737 * Raises a FPU stack overflow exception on a push.
7738 *
7739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7740 */
7741DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7742{
7743 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7744 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7745 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7746 iemFpuStackPushOverflowOnly(pFpuCtx);
7747}
7748
7749
7750/**
7751 * Raises a FPU stack overflow exception on a push with a memory operand.
7752 *
7753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7754 * @param iEffSeg The effective memory operand selector register.
7755 * @param GCPtrEff The effective memory operand offset.
7756 */
7757DECL_NO_INLINE(IEM_STATIC, void)
7758iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7759{
7760 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7761 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7762 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7763 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7764 iemFpuStackPushOverflowOnly(pFpuCtx);
7765}
7766
7767
7768IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7769{
7770 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7771 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7772 if (pFpuCtx->FTW & RT_BIT(iReg))
7773 return VINF_SUCCESS;
7774 return VERR_NOT_FOUND;
7775}
7776
7777
7778IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7779{
7780 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7781 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7782 if (pFpuCtx->FTW & RT_BIT(iReg))
7783 {
7784 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7785 return VINF_SUCCESS;
7786 }
7787 return VERR_NOT_FOUND;
7788}
7789
7790
7791IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7792 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7793{
7794 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7795 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7796 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7797 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7798 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7799 {
7800 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7801 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7802 return VINF_SUCCESS;
7803 }
7804 return VERR_NOT_FOUND;
7805}
7806
7807
7808IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7809{
7810 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7811 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7812 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7813 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7814 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7815 {
7816 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7817 return VINF_SUCCESS;
7818 }
7819 return VERR_NOT_FOUND;
7820}
7821
7822
7823/**
7824 * Updates the FPU exception status after FCW is changed.
7825 *
7826 * @param pFpuCtx The FPU context.
7827 */
7828IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7829{
7830 uint16_t u16Fsw = pFpuCtx->FSW;
7831 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7832 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7833 else
7834 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7835 pFpuCtx->FSW = u16Fsw;
7836}
7837
7838
7839/**
7840 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7841 *
7842 * @returns The full FTW.
7843 * @param pFpuCtx The FPU context.
7844 */
7845IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7846{
7847 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7848 uint16_t u16Ftw = 0;
7849 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7850 for (unsigned iSt = 0; iSt < 8; iSt++)
7851 {
7852 unsigned const iReg = (iSt + iTop) & 7;
7853 if (!(u8Ftw & RT_BIT(iReg)))
7854 u16Ftw |= 3 << (iReg * 2); /* empty */
7855 else
7856 {
7857 uint16_t uTag;
7858 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7859 if (pr80Reg->s.uExponent == 0x7fff)
7860 uTag = 2; /* Exponent is all 1's => Special. */
7861 else if (pr80Reg->s.uExponent == 0x0000)
7862 {
7863 if (pr80Reg->s.u64Mantissa == 0x0000)
7864 uTag = 1; /* All bits are zero => Zero. */
7865 else
7866 uTag = 2; /* Must be special. */
7867 }
7868 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7869 uTag = 0; /* Valid. */
7870 else
7871 uTag = 2; /* Must be special. */
7872
7873 u16Ftw |= uTag << (iReg * 2); /* empty */
7874 }
7875 }
7876
7877 return u16Ftw;
7878}
7879
7880
7881/**
7882 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7883 *
7884 * @returns The compressed FTW.
7885 * @param u16FullFtw The full FTW to convert.
7886 */
7887IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7888{
7889 uint8_t u8Ftw = 0;
7890 for (unsigned i = 0; i < 8; i++)
7891 {
7892 if ((u16FullFtw & 3) != 3 /*empty*/)
7893 u8Ftw |= RT_BIT(i);
7894 u16FullFtw >>= 2;
7895 }
7896
7897 return u8Ftw;
7898}
7899
7900/** @} */
7901
7902
7903/** @name Memory access.
7904 *
7905 * @{
7906 */
7907
7908
7909/**
7910 * Updates the IEMCPU::cbWritten counter if applicable.
7911 *
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param fAccess The access being accounted for.
7914 * @param cbMem The access size.
7915 */
7916DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7917{
7918 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7919 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7920 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7921}
7922
7923
7924/**
7925 * Checks if the given segment can be written to, raise the appropriate
7926 * exception if not.
7927 *
7928 * @returns VBox strict status code.
7929 *
7930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7931 * @param pHid Pointer to the hidden register.
7932 * @param iSegReg The register number.
7933 * @param pu64BaseAddr Where to return the base address to use for the
7934 * segment. (In 64-bit code it may differ from the
7935 * base in the hidden segment.)
7936 */
7937IEM_STATIC VBOXSTRICTRC
7938iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7939{
7940 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7941 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7942 else
7943 {
7944 if (!pHid->Attr.n.u1Present)
7945 {
7946 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7947 AssertRelease(uSel == 0);
7948 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7949 return iemRaiseGeneralProtectionFault0(pVCpu);
7950 }
7951
7952 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7953 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7954 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7955 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7956 *pu64BaseAddr = pHid->u64Base;
7957 }
7958 return VINF_SUCCESS;
7959}
7960
7961
7962/**
7963 * Checks if the given segment can be read from, raise the appropriate
7964 * exception if not.
7965 *
7966 * @returns VBox strict status code.
7967 *
7968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7969 * @param pHid Pointer to the hidden register.
7970 * @param iSegReg The register number.
7971 * @param pu64BaseAddr Where to return the base address to use for the
7972 * segment. (In 64-bit code it may differ from the
7973 * base in the hidden segment.)
7974 */
7975IEM_STATIC VBOXSTRICTRC
7976iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7977{
7978 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7979 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7980 else
7981 {
7982 if (!pHid->Attr.n.u1Present)
7983 {
7984 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7985 AssertRelease(uSel == 0);
7986 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7987 return iemRaiseGeneralProtectionFault0(pVCpu);
7988 }
7989
7990 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7991 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7992 *pu64BaseAddr = pHid->u64Base;
7993 }
7994 return VINF_SUCCESS;
7995}
7996
7997
7998/**
7999 * Applies the segment limit, base and attributes.
8000 *
8001 * This may raise a \#GP or \#SS.
8002 *
8003 * @returns VBox strict status code.
8004 *
8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8006 * @param fAccess The kind of access which is being performed.
8007 * @param iSegReg The index of the segment register to apply.
8008 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8009 * TSS, ++).
8010 * @param cbMem The access size.
8011 * @param pGCPtrMem Pointer to the guest memory address to apply
8012 * segmentation to. Input and output parameter.
8013 */
8014IEM_STATIC VBOXSTRICTRC
8015iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8016{
8017 if (iSegReg == UINT8_MAX)
8018 return VINF_SUCCESS;
8019
8020 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8021 switch (pVCpu->iem.s.enmCpuMode)
8022 {
8023 case IEMMODE_16BIT:
8024 case IEMMODE_32BIT:
8025 {
8026 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8027 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8028
8029 if ( pSel->Attr.n.u1Present
8030 && !pSel->Attr.n.u1Unusable)
8031 {
8032 Assert(pSel->Attr.n.u1DescType);
8033 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8034 {
8035 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8036 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8037 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8038
8039 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8040 {
8041 /** @todo CPL check. */
8042 }
8043
8044 /*
8045 * There are two kinds of data selectors, normal and expand down.
8046 */
8047 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8048 {
8049 if ( GCPtrFirst32 > pSel->u32Limit
8050 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8051 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8052 }
8053 else
8054 {
8055 /*
8056 * The upper boundary is defined by the B bit, not the G bit!
8057 */
8058 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8059 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8060 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8061 }
8062 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8063 }
8064 else
8065 {
8066
8067 /*
8068 * Code selector and usually be used to read thru, writing is
8069 * only permitted in real and V8086 mode.
8070 */
8071 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8072 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8073 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8074 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8075 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8076
8077 if ( GCPtrFirst32 > pSel->u32Limit
8078 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8079 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8080
8081 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8082 {
8083 /** @todo CPL check. */
8084 }
8085
8086 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8087 }
8088 }
8089 else
8090 return iemRaiseGeneralProtectionFault0(pVCpu);
8091 return VINF_SUCCESS;
8092 }
8093
8094 case IEMMODE_64BIT:
8095 {
8096 RTGCPTR GCPtrMem = *pGCPtrMem;
8097 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8098 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8099
8100 Assert(cbMem >= 1);
8101 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8102 return VINF_SUCCESS;
8103 return iemRaiseGeneralProtectionFault0(pVCpu);
8104 }
8105
8106 default:
8107 AssertFailedReturn(VERR_IEM_IPE_7);
8108 }
8109}
8110
8111
8112/**
8113 * Translates a virtual address to a physical physical address and checks if we
8114 * can access the page as specified.
8115 *
8116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8117 * @param GCPtrMem The virtual address.
8118 * @param fAccess The intended access.
8119 * @param pGCPhysMem Where to return the physical address.
8120 */
8121IEM_STATIC VBOXSTRICTRC
8122iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8123{
8124 /** @todo Need a different PGM interface here. We're currently using
8125 * generic / REM interfaces. this won't cut it for R0 & RC. */
8126 RTGCPHYS GCPhys;
8127 uint64_t fFlags;
8128 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8129 if (RT_FAILURE(rc))
8130 {
8131 /** @todo Check unassigned memory in unpaged mode. */
8132 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8133 *pGCPhysMem = NIL_RTGCPHYS;
8134 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8135 }
8136
8137 /* If the page is writable and does not have the no-exec bit set, all
8138 access is allowed. Otherwise we'll have to check more carefully... */
8139 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8140 {
8141 /* Write to read only memory? */
8142 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8143 && !(fFlags & X86_PTE_RW)
8144 && ( (pVCpu->iem.s.uCpl == 3
8145 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8146 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8147 {
8148 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8149 *pGCPhysMem = NIL_RTGCPHYS;
8150 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8151 }
8152
8153 /* Kernel memory accessed by userland? */
8154 if ( !(fFlags & X86_PTE_US)
8155 && pVCpu->iem.s.uCpl == 3
8156 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8157 {
8158 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8159 *pGCPhysMem = NIL_RTGCPHYS;
8160 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8161 }
8162
8163 /* Executing non-executable memory? */
8164 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8165 && (fFlags & X86_PTE_PAE_NX)
8166 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8167 {
8168 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8169 *pGCPhysMem = NIL_RTGCPHYS;
8170 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8171 VERR_ACCESS_DENIED);
8172 }
8173 }
8174
8175 /*
8176 * Set the dirty / access flags.
8177 * ASSUMES this is set when the address is translated rather than on committ...
8178 */
8179 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8180 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8181 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8182 {
8183 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8184 AssertRC(rc2);
8185 }
8186
8187 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8188 *pGCPhysMem = GCPhys;
8189 return VINF_SUCCESS;
8190}
8191
8192
8193
8194/**
8195 * Maps a physical page.
8196 *
8197 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8199 * @param GCPhysMem The physical address.
8200 * @param fAccess The intended access.
8201 * @param ppvMem Where to return the mapping address.
8202 * @param pLock The PGM lock.
8203 */
8204IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8205{
8206#ifdef IEM_VERIFICATION_MODE_FULL
8207 /* Force the alternative path so we can ignore writes. */
8208 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8209 {
8210 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8211 {
8212 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8213 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8214 if (RT_FAILURE(rc2))
8215 pVCpu->iem.s.fProblematicMemory = true;
8216 }
8217 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8218 }
8219#endif
8220#ifdef IEM_LOG_MEMORY_WRITES
8221 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8222 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8223#endif
8224#ifdef IEM_VERIFICATION_MODE_MINIMAL
8225 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8226#endif
8227
8228 /** @todo This API may require some improving later. A private deal with PGM
8229 * regarding locking and unlocking needs to be struct. A couple of TLBs
8230 * living in PGM, but with publicly accessible inlined access methods
8231 * could perhaps be an even better solution. */
8232 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8233 GCPhysMem,
8234 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8235 pVCpu->iem.s.fBypassHandlers,
8236 ppvMem,
8237 pLock);
8238 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8239 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8240
8241#ifdef IEM_VERIFICATION_MODE_FULL
8242 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8243 pVCpu->iem.s.fProblematicMemory = true;
8244#endif
8245 return rc;
8246}
8247
8248
8249/**
8250 * Unmap a page previously mapped by iemMemPageMap.
8251 *
8252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8253 * @param GCPhysMem The physical address.
8254 * @param fAccess The intended access.
8255 * @param pvMem What iemMemPageMap returned.
8256 * @param pLock The PGM lock.
8257 */
8258DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8259{
8260 NOREF(pVCpu);
8261 NOREF(GCPhysMem);
8262 NOREF(fAccess);
8263 NOREF(pvMem);
8264 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8265}
8266
8267
8268/**
8269 * Looks up a memory mapping entry.
8270 *
8271 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8273 * @param pvMem The memory address.
8274 * @param fAccess The access to.
8275 */
8276DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8277{
8278 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8279 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8280 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8281 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8282 return 0;
8283 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8284 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8285 return 1;
8286 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8287 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8288 return 2;
8289 return VERR_NOT_FOUND;
8290}
8291
8292
8293/**
8294 * Finds a free memmap entry when using iNextMapping doesn't work.
8295 *
8296 * @returns Memory mapping index, 1024 on failure.
8297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8298 */
8299IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8300{
8301 /*
8302 * The easy case.
8303 */
8304 if (pVCpu->iem.s.cActiveMappings == 0)
8305 {
8306 pVCpu->iem.s.iNextMapping = 1;
8307 return 0;
8308 }
8309
8310 /* There should be enough mappings for all instructions. */
8311 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8312
8313 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8314 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8315 return i;
8316
8317 AssertFailedReturn(1024);
8318}
8319
8320
8321/**
8322 * Commits a bounce buffer that needs writing back and unmaps it.
8323 *
8324 * @returns Strict VBox status code.
8325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8326 * @param iMemMap The index of the buffer to commit.
8327 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8328 * Always false in ring-3, obviously.
8329 */
8330IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8331{
8332 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8333 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8334#ifdef IN_RING3
8335 Assert(!fPostponeFail);
8336 RT_NOREF_PV(fPostponeFail);
8337#endif
8338
8339 /*
8340 * Do the writing.
8341 */
8342#ifndef IEM_VERIFICATION_MODE_MINIMAL
8343 PVM pVM = pVCpu->CTX_SUFF(pVM);
8344 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8345 && !IEM_VERIFICATION_ENABLED(pVCpu))
8346 {
8347 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8348 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8349 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8350 if (!pVCpu->iem.s.fBypassHandlers)
8351 {
8352 /*
8353 * Carefully and efficiently dealing with access handler return
8354 * codes make this a little bloated.
8355 */
8356 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8358 pbBuf,
8359 cbFirst,
8360 PGMACCESSORIGIN_IEM);
8361 if (rcStrict == VINF_SUCCESS)
8362 {
8363 if (cbSecond)
8364 {
8365 rcStrict = PGMPhysWrite(pVM,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8367 pbBuf + cbFirst,
8368 cbSecond,
8369 PGMACCESSORIGIN_IEM);
8370 if (rcStrict == VINF_SUCCESS)
8371 { /* nothing */ }
8372 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8377 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8378 }
8379# ifndef IN_RING3
8380 else if (fPostponeFail)
8381 {
8382 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8383 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8385 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8386 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8387 return iemSetPassUpStatus(pVCpu, rcStrict);
8388 }
8389# endif
8390 else
8391 {
8392 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8395 return rcStrict;
8396 }
8397 }
8398 }
8399 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8400 {
8401 if (!cbSecond)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8405 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8406 }
8407 else
8408 {
8409 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8411 pbBuf + cbFirst,
8412 cbSecond,
8413 PGMACCESSORIGIN_IEM);
8414 if (rcStrict2 == VINF_SUCCESS)
8415 {
8416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8419 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8420 }
8421 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8426 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8427 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429# ifndef IN_RING3
8430 else if (fPostponeFail)
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8435 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8436 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8437 return iemSetPassUpStatus(pVCpu, rcStrict);
8438 }
8439# endif
8440 else
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8445 return rcStrict2;
8446 }
8447 }
8448 }
8449# ifndef IN_RING3
8450 else if (fPostponeFail)
8451 {
8452 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8455 if (!cbSecond)
8456 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8457 else
8458 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8459 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8460 return iemSetPassUpStatus(pVCpu, rcStrict);
8461 }
8462# endif
8463 else
8464 {
8465 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8468 return rcStrict;
8469 }
8470 }
8471 else
8472 {
8473 /*
8474 * No access handlers, much simpler.
8475 */
8476 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8477 if (RT_SUCCESS(rc))
8478 {
8479 if (cbSecond)
8480 {
8481 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8482 if (RT_SUCCESS(rc))
8483 { /* likely */ }
8484 else
8485 {
8486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8489 return rc;
8490 }
8491 }
8492 }
8493 else
8494 {
8495 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8496 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8497 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8498 return rc;
8499 }
8500 }
8501 }
8502#endif
8503
8504#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8505 /*
8506 * Record the write(s).
8507 */
8508 if (!pVCpu->iem.s.fNoRem)
8509 {
8510 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8511 if (pEvtRec)
8512 {
8513 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8514 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8515 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8516 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8517 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8518 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8519 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8520 }
8521 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8522 {
8523 pEvtRec = iemVerifyAllocRecord(pVCpu);
8524 if (pEvtRec)
8525 {
8526 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8527 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8528 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8529 memcpy(pEvtRec->u.RamWrite.ab,
8530 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8531 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8532 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8533 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8534 }
8535 }
8536 }
8537#endif
8538#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8539 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8540 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8541 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8542 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8543 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8544 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8545
8546 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8547 g_cbIemWrote = cbWrote;
8548 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8549#endif
8550
8551 /*
8552 * Free the mapping entry.
8553 */
8554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8555 Assert(pVCpu->iem.s.cActiveMappings != 0);
8556 pVCpu->iem.s.cActiveMappings--;
8557 return VINF_SUCCESS;
8558}
8559
8560
8561/**
8562 * iemMemMap worker that deals with a request crossing pages.
8563 */
8564IEM_STATIC VBOXSTRICTRC
8565iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8566{
8567 /*
8568 * Do the address translations.
8569 */
8570 RTGCPHYS GCPhysFirst;
8571 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8572 if (rcStrict != VINF_SUCCESS)
8573 return rcStrict;
8574
8575 RTGCPHYS GCPhysSecond;
8576 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8577 fAccess, &GCPhysSecond);
8578 if (rcStrict != VINF_SUCCESS)
8579 return rcStrict;
8580 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8581
8582 PVM pVM = pVCpu->CTX_SUFF(pVM);
8583#ifdef IEM_VERIFICATION_MODE_FULL
8584 /*
8585 * Detect problematic memory when verifying so we can select
8586 * the right execution engine. (TLB: Redo this.)
8587 */
8588 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8589 {
8590 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8591 if (RT_SUCCESS(rc2))
8592 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8593 if (RT_FAILURE(rc2))
8594 pVCpu->iem.s.fProblematicMemory = true;
8595 }
8596#endif
8597
8598
8599 /*
8600 * Read in the current memory content if it's a read, execute or partial
8601 * write access.
8602 */
8603 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8604 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8605 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8606
8607 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8608 {
8609 if (!pVCpu->iem.s.fBypassHandlers)
8610 {
8611 /*
8612 * Must carefully deal with access handler status codes here,
8613 * makes the code a bit bloated.
8614 */
8615 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8616 if (rcStrict == VINF_SUCCESS)
8617 {
8618 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8619 if (rcStrict == VINF_SUCCESS)
8620 { /*likely */ }
8621 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8622 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8623 else
8624 {
8625 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8626 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8627 return rcStrict;
8628 }
8629 }
8630 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8631 {
8632 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8633 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8634 {
8635 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8636 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8637 }
8638 else
8639 {
8640 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8641 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8642 return rcStrict2;
8643 }
8644 }
8645 else
8646 {
8647 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8648 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8649 return rcStrict;
8650 }
8651 }
8652 else
8653 {
8654 /*
8655 * No informational status codes here, much more straight forward.
8656 */
8657 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8658 if (RT_SUCCESS(rc))
8659 {
8660 Assert(rc == VINF_SUCCESS);
8661 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8662 if (RT_SUCCESS(rc))
8663 Assert(rc == VINF_SUCCESS);
8664 else
8665 {
8666 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8667 return rc;
8668 }
8669 }
8670 else
8671 {
8672 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8673 return rc;
8674 }
8675 }
8676
8677#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8678 if ( !pVCpu->iem.s.fNoRem
8679 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8680 {
8681 /*
8682 * Record the reads.
8683 */
8684 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8685 if (pEvtRec)
8686 {
8687 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8688 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8689 pEvtRec->u.RamRead.cb = cbFirstPage;
8690 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8691 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8692 }
8693 pEvtRec = iemVerifyAllocRecord(pVCpu);
8694 if (pEvtRec)
8695 {
8696 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8697 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8698 pEvtRec->u.RamRead.cb = cbSecondPage;
8699 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8700 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8701 }
8702 }
8703#endif
8704 }
8705#ifdef VBOX_STRICT
8706 else
8707 memset(pbBuf, 0xcc, cbMem);
8708 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8709 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8710#endif
8711
8712 /*
8713 * Commit the bounce buffer entry.
8714 */
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8716 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8717 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8718 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8719 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8720 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8721 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8722 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8723 pVCpu->iem.s.cActiveMappings++;
8724
8725 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8726 *ppvMem = pbBuf;
8727 return VINF_SUCCESS;
8728}
8729
8730
8731/**
8732 * iemMemMap woker that deals with iemMemPageMap failures.
8733 */
8734IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8735 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8736{
8737 /*
8738 * Filter out conditions we can handle and the ones which shouldn't happen.
8739 */
8740 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8741 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8742 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8743 {
8744 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8745 return rcMap;
8746 }
8747 pVCpu->iem.s.cPotentialExits++;
8748
8749 /*
8750 * Read in the current memory content if it's a read, execute or partial
8751 * write access.
8752 */
8753 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8754 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8755 {
8756 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8757 memset(pbBuf, 0xff, cbMem);
8758 else
8759 {
8760 int rc;
8761 if (!pVCpu->iem.s.fBypassHandlers)
8762 {
8763 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8764 if (rcStrict == VINF_SUCCESS)
8765 { /* nothing */ }
8766 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8767 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8768 else
8769 {
8770 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8771 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8772 return rcStrict;
8773 }
8774 }
8775 else
8776 {
8777 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8778 if (RT_SUCCESS(rc))
8779 { /* likely */ }
8780 else
8781 {
8782 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8783 GCPhysFirst, rc));
8784 return rc;
8785 }
8786 }
8787 }
8788
8789#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8790 if ( !pVCpu->iem.s.fNoRem
8791 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8792 {
8793 /*
8794 * Record the read.
8795 */
8796 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8797 if (pEvtRec)
8798 {
8799 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8800 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8801 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8802 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8803 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8804 }
8805 }
8806#endif
8807 }
8808#ifdef VBOX_STRICT
8809 else
8810 memset(pbBuf, 0xcc, cbMem);
8811#endif
8812#ifdef VBOX_STRICT
8813 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8814 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8815#endif
8816
8817 /*
8818 * Commit the bounce buffer entry.
8819 */
8820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8821 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8822 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8823 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8824 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8825 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8826 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8827 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8828 pVCpu->iem.s.cActiveMappings++;
8829
8830 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8831 *ppvMem = pbBuf;
8832 return VINF_SUCCESS;
8833}
8834
8835
8836
8837/**
8838 * Maps the specified guest memory for the given kind of access.
8839 *
8840 * This may be using bounce buffering of the memory if it's crossing a page
8841 * boundary or if there is an access handler installed for any of it. Because
8842 * of lock prefix guarantees, we're in for some extra clutter when this
8843 * happens.
8844 *
8845 * This may raise a \#GP, \#SS, \#PF or \#AC.
8846 *
8847 * @returns VBox strict status code.
8848 *
8849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8850 * @param ppvMem Where to return the pointer to the mapped
8851 * memory.
8852 * @param cbMem The number of bytes to map. This is usually 1,
8853 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8854 * string operations it can be up to a page.
8855 * @param iSegReg The index of the segment register to use for
8856 * this access. The base and limits are checked.
8857 * Use UINT8_MAX to indicate that no segmentation
8858 * is required (for IDT, GDT and LDT accesses).
8859 * @param GCPtrMem The address of the guest memory.
8860 * @param fAccess How the memory is being accessed. The
8861 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8862 * how to map the memory, while the
8863 * IEM_ACCESS_WHAT_XXX bit is used when raising
8864 * exceptions.
8865 */
8866IEM_STATIC VBOXSTRICTRC
8867iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8868{
8869 /*
8870 * Check the input and figure out which mapping entry to use.
8871 */
8872 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8873 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8874 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8875
8876 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8877 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8878 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8879 {
8880 iMemMap = iemMemMapFindFree(pVCpu);
8881 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8882 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8883 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8884 pVCpu->iem.s.aMemMappings[2].fAccess),
8885 VERR_IEM_IPE_9);
8886 }
8887
8888 /*
8889 * Map the memory, checking that we can actually access it. If something
8890 * slightly complicated happens, fall back on bounce buffering.
8891 */
8892 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8893 if (rcStrict != VINF_SUCCESS)
8894 return rcStrict;
8895
8896 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8897 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8898
8899 RTGCPHYS GCPhysFirst;
8900 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8901 if (rcStrict != VINF_SUCCESS)
8902 return rcStrict;
8903
8904 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8905 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8906 if (fAccess & IEM_ACCESS_TYPE_READ)
8907 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8908
8909 void *pvMem;
8910 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8911 if (rcStrict != VINF_SUCCESS)
8912 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8913
8914 /*
8915 * Fill in the mapping table entry.
8916 */
8917 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8918 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8919 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8920 pVCpu->iem.s.cActiveMappings++;
8921
8922 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8923 *ppvMem = pvMem;
8924 return VINF_SUCCESS;
8925}
8926
8927
8928/**
8929 * Commits the guest memory if bounce buffered and unmaps it.
8930 *
8931 * @returns Strict VBox status code.
8932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8933 * @param pvMem The mapping.
8934 * @param fAccess The kind of access.
8935 */
8936IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8937{
8938 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8939 AssertReturn(iMemMap >= 0, iMemMap);
8940
8941 /* If it's bounce buffered, we may need to write back the buffer. */
8942 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8943 {
8944 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8945 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8946 }
8947 /* Otherwise unlock it. */
8948 else
8949 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8950
8951 /* Free the entry. */
8952 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8953 Assert(pVCpu->iem.s.cActiveMappings != 0);
8954 pVCpu->iem.s.cActiveMappings--;
8955 return VINF_SUCCESS;
8956}
8957
8958#ifdef IEM_WITH_SETJMP
8959
8960/**
8961 * Maps the specified guest memory for the given kind of access, longjmp on
8962 * error.
8963 *
8964 * This may be using bounce buffering of the memory if it's crossing a page
8965 * boundary or if there is an access handler installed for any of it. Because
8966 * of lock prefix guarantees, we're in for some extra clutter when this
8967 * happens.
8968 *
8969 * This may raise a \#GP, \#SS, \#PF or \#AC.
8970 *
8971 * @returns Pointer to the mapped memory.
8972 *
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param cbMem The number of bytes to map. This is usually 1,
8975 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8976 * string operations it can be up to a page.
8977 * @param iSegReg The index of the segment register to use for
8978 * this access. The base and limits are checked.
8979 * Use UINT8_MAX to indicate that no segmentation
8980 * is required (for IDT, GDT and LDT accesses).
8981 * @param GCPtrMem The address of the guest memory.
8982 * @param fAccess How the memory is being accessed. The
8983 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8984 * how to map the memory, while the
8985 * IEM_ACCESS_WHAT_XXX bit is used when raising
8986 * exceptions.
8987 */
8988IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8989{
8990 /*
8991 * Check the input and figure out which mapping entry to use.
8992 */
8993 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8994 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8995 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8996
8997 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8998 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8999 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9000 {
9001 iMemMap = iemMemMapFindFree(pVCpu);
9002 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9003 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9004 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9005 pVCpu->iem.s.aMemMappings[2].fAccess),
9006 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9007 }
9008
9009 /*
9010 * Map the memory, checking that we can actually access it. If something
9011 * slightly complicated happens, fall back on bounce buffering.
9012 */
9013 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9014 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9015 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016
9017 /* Crossing a page boundary? */
9018 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9019 { /* No (likely). */ }
9020 else
9021 {
9022 void *pvMem;
9023 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9024 if (rcStrict == VINF_SUCCESS)
9025 return pvMem;
9026 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9027 }
9028
9029 RTGCPHYS GCPhysFirst;
9030 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9031 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9032 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9033
9034 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9035 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9036 if (fAccess & IEM_ACCESS_TYPE_READ)
9037 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9038
9039 void *pvMem;
9040 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9041 if (rcStrict == VINF_SUCCESS)
9042 { /* likely */ }
9043 else
9044 {
9045 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9046 if (rcStrict == VINF_SUCCESS)
9047 return pvMem;
9048 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9049 }
9050
9051 /*
9052 * Fill in the mapping table entry.
9053 */
9054 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9055 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9056 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9057 pVCpu->iem.s.cActiveMappings++;
9058
9059 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9060 return pvMem;
9061}
9062
9063
9064/**
9065 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9066 *
9067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9068 * @param pvMem The mapping.
9069 * @param fAccess The kind of access.
9070 */
9071IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9072{
9073 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9074 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9075
9076 /* If it's bounce buffered, we may need to write back the buffer. */
9077 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9078 {
9079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9080 {
9081 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9082 if (rcStrict == VINF_SUCCESS)
9083 return;
9084 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9085 }
9086 }
9087 /* Otherwise unlock it. */
9088 else
9089 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9090
9091 /* Free the entry. */
9092 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9093 Assert(pVCpu->iem.s.cActiveMappings != 0);
9094 pVCpu->iem.s.cActiveMappings--;
9095}
9096
9097#endif
9098
9099#ifndef IN_RING3
9100/**
9101 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9102 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9103 *
9104 * Allows the instruction to be completed and retired, while the IEM user will
9105 * return to ring-3 immediately afterwards and do the postponed writes there.
9106 *
9107 * @returns VBox status code (no strict statuses). Caller must check
9108 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9110 * @param pvMem The mapping.
9111 * @param fAccess The kind of access.
9112 */
9113IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9114{
9115 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9116 AssertReturn(iMemMap >= 0, iMemMap);
9117
9118 /* If it's bounce buffered, we may need to write back the buffer. */
9119 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9120 {
9121 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9122 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9123 }
9124 /* Otherwise unlock it. */
9125 else
9126 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9127
9128 /* Free the entry. */
9129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9130 Assert(pVCpu->iem.s.cActiveMappings != 0);
9131 pVCpu->iem.s.cActiveMappings--;
9132 return VINF_SUCCESS;
9133}
9134#endif
9135
9136
9137/**
9138 * Rollbacks mappings, releasing page locks and such.
9139 *
9140 * The caller shall only call this after checking cActiveMappings.
9141 *
9142 * @returns Strict VBox status code to pass up.
9143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9144 */
9145IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9146{
9147 Assert(pVCpu->iem.s.cActiveMappings > 0);
9148
9149 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9150 while (iMemMap-- > 0)
9151 {
9152 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9153 if (fAccess != IEM_ACCESS_INVALID)
9154 {
9155 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9156 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9157 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9158 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9159 Assert(pVCpu->iem.s.cActiveMappings > 0);
9160 pVCpu->iem.s.cActiveMappings--;
9161 }
9162 }
9163}
9164
9165
9166/**
9167 * Fetches a data byte.
9168 *
9169 * @returns Strict VBox status code.
9170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9171 * @param pu8Dst Where to return the byte.
9172 * @param iSegReg The index of the segment register to use for
9173 * this access. The base and limits are checked.
9174 * @param GCPtrMem The address of the guest memory.
9175 */
9176IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9177{
9178 /* The lazy approach for now... */
9179 uint8_t const *pu8Src;
9180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9181 if (rc == VINF_SUCCESS)
9182 {
9183 *pu8Dst = *pu8Src;
9184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9185 }
9186 return rc;
9187}
9188
9189
9190#ifdef IEM_WITH_SETJMP
9191/**
9192 * Fetches a data byte, longjmp on error.
9193 *
9194 * @returns The byte.
9195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9196 * @param iSegReg The index of the segment register to use for
9197 * this access. The base and limits are checked.
9198 * @param GCPtrMem The address of the guest memory.
9199 */
9200DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9201{
9202 /* The lazy approach for now... */
9203 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9204 uint8_t const bRet = *pu8Src;
9205 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9206 return bRet;
9207}
9208#endif /* IEM_WITH_SETJMP */
9209
9210
9211/**
9212 * Fetches a data word.
9213 *
9214 * @returns Strict VBox status code.
9215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9216 * @param pu16Dst Where to return the word.
9217 * @param iSegReg The index of the segment register to use for
9218 * this access. The base and limits are checked.
9219 * @param GCPtrMem The address of the guest memory.
9220 */
9221IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9222{
9223 /* The lazy approach for now... */
9224 uint16_t const *pu16Src;
9225 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9226 if (rc == VINF_SUCCESS)
9227 {
9228 *pu16Dst = *pu16Src;
9229 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9230 }
9231 return rc;
9232}
9233
9234
9235#ifdef IEM_WITH_SETJMP
9236/**
9237 * Fetches a data word, longjmp on error.
9238 *
9239 * @returns The word
9240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9241 * @param iSegReg The index of the segment register to use for
9242 * this access. The base and limits are checked.
9243 * @param GCPtrMem The address of the guest memory.
9244 */
9245DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9246{
9247 /* The lazy approach for now... */
9248 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9249 uint16_t const u16Ret = *pu16Src;
9250 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9251 return u16Ret;
9252}
9253#endif
9254
9255
9256/**
9257 * Fetches a data dword.
9258 *
9259 * @returns Strict VBox status code.
9260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9261 * @param pu32Dst Where to return the dword.
9262 * @param iSegReg The index of the segment register to use for
9263 * this access. The base and limits are checked.
9264 * @param GCPtrMem The address of the guest memory.
9265 */
9266IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9267{
9268 /* The lazy approach for now... */
9269 uint32_t const *pu32Src;
9270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9271 if (rc == VINF_SUCCESS)
9272 {
9273 *pu32Dst = *pu32Src;
9274 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9275 }
9276 return rc;
9277}
9278
9279
9280#ifdef IEM_WITH_SETJMP
9281
9282IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9283{
9284 Assert(cbMem >= 1);
9285 Assert(iSegReg < X86_SREG_COUNT);
9286
9287 /*
9288 * 64-bit mode is simpler.
9289 */
9290 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9291 {
9292 if (iSegReg >= X86_SREG_FS)
9293 {
9294 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9295 GCPtrMem += pSel->u64Base;
9296 }
9297
9298 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9299 return GCPtrMem;
9300 }
9301 /*
9302 * 16-bit and 32-bit segmentation.
9303 */
9304 else
9305 {
9306 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9307 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9308 == X86DESCATTR_P /* data, expand up */
9309 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9310 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9311 {
9312 /* expand up */
9313 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9314 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9315 && GCPtrLast32 > (uint32_t)GCPtrMem))
9316 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9317 }
9318 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9319 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9320 {
9321 /* expand down */
9322 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9323 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9324 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9325 && GCPtrLast32 > (uint32_t)GCPtrMem))
9326 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9327 }
9328 else
9329 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9330 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9331 }
9332 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9333}
9334
9335
9336IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9337{
9338 Assert(cbMem >= 1);
9339 Assert(iSegReg < X86_SREG_COUNT);
9340
9341 /*
9342 * 64-bit mode is simpler.
9343 */
9344 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9345 {
9346 if (iSegReg >= X86_SREG_FS)
9347 {
9348 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9349 GCPtrMem += pSel->u64Base;
9350 }
9351
9352 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9353 return GCPtrMem;
9354 }
9355 /*
9356 * 16-bit and 32-bit segmentation.
9357 */
9358 else
9359 {
9360 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9361 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9362 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9363 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9364 {
9365 /* expand up */
9366 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9367 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9368 && GCPtrLast32 > (uint32_t)GCPtrMem))
9369 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9370 }
9371 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9372 {
9373 /* expand down */
9374 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9375 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9376 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9377 && GCPtrLast32 > (uint32_t)GCPtrMem))
9378 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9379 }
9380 else
9381 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9382 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9383 }
9384 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9385}
9386
9387
9388/**
9389 * Fetches a data dword, longjmp on error, fallback/safe version.
9390 *
9391 * @returns The dword
9392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9393 * @param iSegReg The index of the segment register to use for
9394 * this access. The base and limits are checked.
9395 * @param GCPtrMem The address of the guest memory.
9396 */
9397IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9398{
9399 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9400 uint32_t const u32Ret = *pu32Src;
9401 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9402 return u32Ret;
9403}
9404
9405
9406/**
9407 * Fetches a data dword, longjmp on error.
9408 *
9409 * @returns The dword
9410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9411 * @param iSegReg The index of the segment register to use for
9412 * this access. The base and limits are checked.
9413 * @param GCPtrMem The address of the guest memory.
9414 */
9415DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9416{
9417# ifdef IEM_WITH_DATA_TLB
9418 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9419 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9420 {
9421 /// @todo more later.
9422 }
9423
9424 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9425# else
9426 /* The lazy approach. */
9427 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9428 uint32_t const u32Ret = *pu32Src;
9429 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9430 return u32Ret;
9431# endif
9432}
9433#endif
9434
9435
9436#ifdef SOME_UNUSED_FUNCTION
9437/**
9438 * Fetches a data dword and sign extends it to a qword.
9439 *
9440 * @returns Strict VBox status code.
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param pu64Dst Where to return the sign extended value.
9443 * @param iSegReg The index of the segment register to use for
9444 * this access. The base and limits are checked.
9445 * @param GCPtrMem The address of the guest memory.
9446 */
9447IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9448{
9449 /* The lazy approach for now... */
9450 int32_t const *pi32Src;
9451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9452 if (rc == VINF_SUCCESS)
9453 {
9454 *pu64Dst = *pi32Src;
9455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9456 }
9457#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9458 else
9459 *pu64Dst = 0;
9460#endif
9461 return rc;
9462}
9463#endif
9464
9465
9466/**
9467 * Fetches a data qword.
9468 *
9469 * @returns Strict VBox status code.
9470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9471 * @param pu64Dst Where to return the qword.
9472 * @param iSegReg The index of the segment register to use for
9473 * this access. The base and limits are checked.
9474 * @param GCPtrMem The address of the guest memory.
9475 */
9476IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9477{
9478 /* The lazy approach for now... */
9479 uint64_t const *pu64Src;
9480 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9481 if (rc == VINF_SUCCESS)
9482 {
9483 *pu64Dst = *pu64Src;
9484 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9485 }
9486 return rc;
9487}
9488
9489
9490#ifdef IEM_WITH_SETJMP
9491/**
9492 * Fetches a data qword, longjmp on error.
9493 *
9494 * @returns The qword.
9495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9496 * @param iSegReg The index of the segment register to use for
9497 * this access. The base and limits are checked.
9498 * @param GCPtrMem The address of the guest memory.
9499 */
9500DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9501{
9502 /* The lazy approach for now... */
9503 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9504 uint64_t const u64Ret = *pu64Src;
9505 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9506 return u64Ret;
9507}
9508#endif
9509
9510
9511/**
9512 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9513 *
9514 * @returns Strict VBox status code.
9515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9516 * @param pu64Dst Where to return the qword.
9517 * @param iSegReg The index of the segment register to use for
9518 * this access. The base and limits are checked.
9519 * @param GCPtrMem The address of the guest memory.
9520 */
9521IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9522{
9523 /* The lazy approach for now... */
9524 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9525 if (RT_UNLIKELY(GCPtrMem & 15))
9526 return iemRaiseGeneralProtectionFault0(pVCpu);
9527
9528 uint64_t const *pu64Src;
9529 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9530 if (rc == VINF_SUCCESS)
9531 {
9532 *pu64Dst = *pu64Src;
9533 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9534 }
9535 return rc;
9536}
9537
9538
9539#ifdef IEM_WITH_SETJMP
9540/**
9541 * Fetches a data qword, longjmp on error.
9542 *
9543 * @returns The qword.
9544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9545 * @param iSegReg The index of the segment register to use for
9546 * this access. The base and limits are checked.
9547 * @param GCPtrMem The address of the guest memory.
9548 */
9549DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9550{
9551 /* The lazy approach for now... */
9552 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9553 if (RT_LIKELY(!(GCPtrMem & 15)))
9554 {
9555 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9556 uint64_t const u64Ret = *pu64Src;
9557 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9558 return u64Ret;
9559 }
9560
9561 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9562 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9563}
9564#endif
9565
9566
9567/**
9568 * Fetches a data tword.
9569 *
9570 * @returns Strict VBox status code.
9571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9572 * @param pr80Dst Where to return the tword.
9573 * @param iSegReg The index of the segment register to use for
9574 * this access. The base and limits are checked.
9575 * @param GCPtrMem The address of the guest memory.
9576 */
9577IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9578{
9579 /* The lazy approach for now... */
9580 PCRTFLOAT80U pr80Src;
9581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9582 if (rc == VINF_SUCCESS)
9583 {
9584 *pr80Dst = *pr80Src;
9585 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9586 }
9587 return rc;
9588}
9589
9590
9591#ifdef IEM_WITH_SETJMP
9592/**
9593 * Fetches a data tword, longjmp on error.
9594 *
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param pr80Dst Where to return the tword.
9597 * @param iSegReg The index of the segment register to use for
9598 * this access. The base and limits are checked.
9599 * @param GCPtrMem The address of the guest memory.
9600 */
9601DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9602{
9603 /* The lazy approach for now... */
9604 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9605 *pr80Dst = *pr80Src;
9606 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9607}
9608#endif
9609
9610
9611/**
9612 * Fetches a data dqword (double qword), generally SSE related.
9613 *
9614 * @returns Strict VBox status code.
9615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9616 * @param pu128Dst Where to return the qword.
9617 * @param iSegReg The index of the segment register to use for
9618 * this access. The base and limits are checked.
9619 * @param GCPtrMem The address of the guest memory.
9620 */
9621IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9622{
9623 /* The lazy approach for now... */
9624 PCRTUINT128U pu128Src;
9625 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9626 if (rc == VINF_SUCCESS)
9627 {
9628 pu128Dst->au64[0] = pu128Src->au64[0];
9629 pu128Dst->au64[1] = pu128Src->au64[1];
9630 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9631 }
9632 return rc;
9633}
9634
9635
9636#ifdef IEM_WITH_SETJMP
9637/**
9638 * Fetches a data dqword (double qword), generally SSE related.
9639 *
9640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9641 * @param pu128Dst Where to return the qword.
9642 * @param iSegReg The index of the segment register to use for
9643 * this access. The base and limits are checked.
9644 * @param GCPtrMem The address of the guest memory.
9645 */
9646IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9647{
9648 /* The lazy approach for now... */
9649 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9650 pu128Dst->au64[0] = pu128Src->au64[0];
9651 pu128Dst->au64[1] = pu128Src->au64[1];
9652 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9653}
9654#endif
9655
9656
9657/**
9658 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9659 * related.
9660 *
9661 * Raises \#GP(0) if not aligned.
9662 *
9663 * @returns Strict VBox status code.
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu128Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9674 if ( (GCPtrMem & 15)
9675 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9676 return iemRaiseGeneralProtectionFault0(pVCpu);
9677
9678 PCRTUINT128U pu128Src;
9679 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9680 if (rc == VINF_SUCCESS)
9681 {
9682 pu128Dst->au64[0] = pu128Src->au64[0];
9683 pu128Dst->au64[1] = pu128Src->au64[1];
9684 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9685 }
9686 return rc;
9687}
9688
9689
9690#ifdef IEM_WITH_SETJMP
9691/**
9692 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9693 * related, longjmp on error.
9694 *
9695 * Raises \#GP(0) if not aligned.
9696 *
9697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9698 * @param pu128Dst Where to return the qword.
9699 * @param iSegReg The index of the segment register to use for
9700 * this access. The base and limits are checked.
9701 * @param GCPtrMem The address of the guest memory.
9702 */
9703DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9704{
9705 /* The lazy approach for now... */
9706 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9707 if ( (GCPtrMem & 15) == 0
9708 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9709 {
9710 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9711 pu128Dst->au64[0] = pu128Src->au64[0];
9712 pu128Dst->au64[1] = pu128Src->au64[1];
9713 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9714 return;
9715 }
9716
9717 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9718 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9719}
9720#endif
9721
9722
9723/**
9724 * Fetches a data oword (octo word), generally AVX related.
9725 *
9726 * @returns Strict VBox status code.
9727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9728 * @param pu256Dst Where to return the qword.
9729 * @param iSegReg The index of the segment register to use for
9730 * this access. The base and limits are checked.
9731 * @param GCPtrMem The address of the guest memory.
9732 */
9733IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9734{
9735 /* The lazy approach for now... */
9736 PCRTUINT256U pu256Src;
9737 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9738 if (rc == VINF_SUCCESS)
9739 {
9740 pu256Dst->au64[0] = pu256Src->au64[0];
9741 pu256Dst->au64[1] = pu256Src->au64[1];
9742 pu256Dst->au64[2] = pu256Src->au64[2];
9743 pu256Dst->au64[3] = pu256Src->au64[3];
9744 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9745 }
9746 return rc;
9747}
9748
9749
9750#ifdef IEM_WITH_SETJMP
9751/**
9752 * Fetches a data oword (octo word), generally AVX related.
9753 *
9754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9755 * @param pu256Dst Where to return the qword.
9756 * @param iSegReg The index of the segment register to use for
9757 * this access. The base and limits are checked.
9758 * @param GCPtrMem The address of the guest memory.
9759 */
9760IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9761{
9762 /* The lazy approach for now... */
9763 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769}
9770#endif
9771
9772
9773/**
9774 * Fetches a data oword (octo word) at an aligned address, generally AVX
9775 * related.
9776 *
9777 * Raises \#GP(0) if not aligned.
9778 *
9779 * @returns Strict VBox status code.
9780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9781 * @param pu256Dst Where to return the qword.
9782 * @param iSegReg The index of the segment register to use for
9783 * this access. The base and limits are checked.
9784 * @param GCPtrMem The address of the guest memory.
9785 */
9786IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9787{
9788 /* The lazy approach for now... */
9789 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9790 if (GCPtrMem & 31)
9791 return iemRaiseGeneralProtectionFault0(pVCpu);
9792
9793 PCRTUINT256U pu256Src;
9794 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9795 if (rc == VINF_SUCCESS)
9796 {
9797 pu256Dst->au64[0] = pu256Src->au64[0];
9798 pu256Dst->au64[1] = pu256Src->au64[1];
9799 pu256Dst->au64[2] = pu256Src->au64[2];
9800 pu256Dst->au64[3] = pu256Src->au64[3];
9801 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9802 }
9803 return rc;
9804}
9805
9806
9807#ifdef IEM_WITH_SETJMP
9808/**
9809 * Fetches a data oword (octo word) at an aligned address, generally AVX
9810 * related, longjmp on error.
9811 *
9812 * Raises \#GP(0) if not aligned.
9813 *
9814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9815 * @param pu256Dst Where to return the qword.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 */
9820DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9821{
9822 /* The lazy approach for now... */
9823 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9824 if ((GCPtrMem & 31) == 0)
9825 {
9826 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9827 pu256Dst->au64[0] = pu256Src->au64[0];
9828 pu256Dst->au64[1] = pu256Src->au64[1];
9829 pu256Dst->au64[2] = pu256Src->au64[2];
9830 pu256Dst->au64[3] = pu256Src->au64[3];
9831 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9832 return;
9833 }
9834
9835 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9836 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9837}
9838#endif
9839
9840
9841
9842/**
9843 * Fetches a descriptor register (lgdt, lidt).
9844 *
9845 * @returns Strict VBox status code.
9846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9847 * @param pcbLimit Where to return the limit.
9848 * @param pGCPtrBase Where to return the base.
9849 * @param iSegReg The index of the segment register to use for
9850 * this access. The base and limits are checked.
9851 * @param GCPtrMem The address of the guest memory.
9852 * @param enmOpSize The effective operand size.
9853 */
9854IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9855 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9856{
9857 /*
9858 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9859 * little special:
9860 * - The two reads are done separately.
9861 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9862 * - We suspect the 386 to actually commit the limit before the base in
9863 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9864 * don't try emulate this eccentric behavior, because it's not well
9865 * enough understood and rather hard to trigger.
9866 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9867 */
9868 VBOXSTRICTRC rcStrict;
9869 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9870 {
9871 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9872 if (rcStrict == VINF_SUCCESS)
9873 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9874 }
9875 else
9876 {
9877 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9878 if (enmOpSize == IEMMODE_32BIT)
9879 {
9880 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9881 {
9882 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9883 if (rcStrict == VINF_SUCCESS)
9884 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9885 }
9886 else
9887 {
9888 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9889 if (rcStrict == VINF_SUCCESS)
9890 {
9891 *pcbLimit = (uint16_t)uTmp;
9892 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9893 }
9894 }
9895 if (rcStrict == VINF_SUCCESS)
9896 *pGCPtrBase = uTmp;
9897 }
9898 else
9899 {
9900 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9901 if (rcStrict == VINF_SUCCESS)
9902 {
9903 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9904 if (rcStrict == VINF_SUCCESS)
9905 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9906 }
9907 }
9908 }
9909 return rcStrict;
9910}
9911
9912
9913
9914/**
9915 * Stores a data byte.
9916 *
9917 * @returns Strict VBox status code.
9918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9919 * @param iSegReg The index of the segment register to use for
9920 * this access. The base and limits are checked.
9921 * @param GCPtrMem The address of the guest memory.
9922 * @param u8Value The value to store.
9923 */
9924IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9925{
9926 /* The lazy approach for now... */
9927 uint8_t *pu8Dst;
9928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9929 if (rc == VINF_SUCCESS)
9930 {
9931 *pu8Dst = u8Value;
9932 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9933 }
9934 return rc;
9935}
9936
9937
9938#ifdef IEM_WITH_SETJMP
9939/**
9940 * Stores a data byte, longjmp on error.
9941 *
9942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9943 * @param iSegReg The index of the segment register to use for
9944 * this access. The base and limits are checked.
9945 * @param GCPtrMem The address of the guest memory.
9946 * @param u8Value The value to store.
9947 */
9948IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9949{
9950 /* The lazy approach for now... */
9951 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9952 *pu8Dst = u8Value;
9953 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9954}
9955#endif
9956
9957
9958/**
9959 * Stores a data word.
9960 *
9961 * @returns Strict VBox status code.
9962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9963 * @param iSegReg The index of the segment register to use for
9964 * this access. The base and limits are checked.
9965 * @param GCPtrMem The address of the guest memory.
9966 * @param u16Value The value to store.
9967 */
9968IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9969{
9970 /* The lazy approach for now... */
9971 uint16_t *pu16Dst;
9972 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9973 if (rc == VINF_SUCCESS)
9974 {
9975 *pu16Dst = u16Value;
9976 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9977 }
9978 return rc;
9979}
9980
9981
9982#ifdef IEM_WITH_SETJMP
9983/**
9984 * Stores a data word, longjmp on error.
9985 *
9986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9987 * @param iSegReg The index of the segment register to use for
9988 * this access. The base and limits are checked.
9989 * @param GCPtrMem The address of the guest memory.
9990 * @param u16Value The value to store.
9991 */
9992IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9993{
9994 /* The lazy approach for now... */
9995 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9996 *pu16Dst = u16Value;
9997 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9998}
9999#endif
10000
10001
10002/**
10003 * Stores a data dword.
10004 *
10005 * @returns Strict VBox status code.
10006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10007 * @param iSegReg The index of the segment register to use for
10008 * this access. The base and limits are checked.
10009 * @param GCPtrMem The address of the guest memory.
10010 * @param u32Value The value to store.
10011 */
10012IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10013{
10014 /* The lazy approach for now... */
10015 uint32_t *pu32Dst;
10016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10017 if (rc == VINF_SUCCESS)
10018 {
10019 *pu32Dst = u32Value;
10020 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10021 }
10022 return rc;
10023}
10024
10025
10026#ifdef IEM_WITH_SETJMP
10027/**
10028 * Stores a data dword.
10029 *
10030 * @returns Strict VBox status code.
10031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10032 * @param iSegReg The index of the segment register to use for
10033 * this access. The base and limits are checked.
10034 * @param GCPtrMem The address of the guest memory.
10035 * @param u32Value The value to store.
10036 */
10037IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10038{
10039 /* The lazy approach for now... */
10040 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10041 *pu32Dst = u32Value;
10042 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10043}
10044#endif
10045
10046
10047/**
10048 * Stores a data qword.
10049 *
10050 * @returns Strict VBox status code.
10051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10052 * @param iSegReg The index of the segment register to use for
10053 * this access. The base and limits are checked.
10054 * @param GCPtrMem The address of the guest memory.
10055 * @param u64Value The value to store.
10056 */
10057IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10058{
10059 /* The lazy approach for now... */
10060 uint64_t *pu64Dst;
10061 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10062 if (rc == VINF_SUCCESS)
10063 {
10064 *pu64Dst = u64Value;
10065 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10066 }
10067 return rc;
10068}
10069
10070
10071#ifdef IEM_WITH_SETJMP
10072/**
10073 * Stores a data qword, longjmp on error.
10074 *
10075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10076 * @param iSegReg The index of the segment register to use for
10077 * this access. The base and limits are checked.
10078 * @param GCPtrMem The address of the guest memory.
10079 * @param u64Value The value to store.
10080 */
10081IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10082{
10083 /* The lazy approach for now... */
10084 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10085 *pu64Dst = u64Value;
10086 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10087}
10088#endif
10089
10090
10091/**
10092 * Stores a data dqword.
10093 *
10094 * @returns Strict VBox status code.
10095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10096 * @param iSegReg The index of the segment register to use for
10097 * this access. The base and limits are checked.
10098 * @param GCPtrMem The address of the guest memory.
10099 * @param u128Value The value to store.
10100 */
10101IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10102{
10103 /* The lazy approach for now... */
10104 PRTUINT128U pu128Dst;
10105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10106 if (rc == VINF_SUCCESS)
10107 {
10108 pu128Dst->au64[0] = u128Value.au64[0];
10109 pu128Dst->au64[1] = u128Value.au64[1];
10110 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10111 }
10112 return rc;
10113}
10114
10115
10116#ifdef IEM_WITH_SETJMP
10117/**
10118 * Stores a data dqword, longjmp on error.
10119 *
10120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10121 * @param iSegReg The index of the segment register to use for
10122 * this access. The base and limits are checked.
10123 * @param GCPtrMem The address of the guest memory.
10124 * @param u128Value The value to store.
10125 */
10126IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10127{
10128 /* The lazy approach for now... */
10129 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10130 pu128Dst->au64[0] = u128Value.au64[0];
10131 pu128Dst->au64[1] = u128Value.au64[1];
10132 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10133}
10134#endif
10135
10136
10137/**
10138 * Stores a data dqword, SSE aligned.
10139 *
10140 * @returns Strict VBox status code.
10141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10142 * @param iSegReg The index of the segment register to use for
10143 * this access. The base and limits are checked.
10144 * @param GCPtrMem The address of the guest memory.
10145 * @param u128Value The value to store.
10146 */
10147IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10148{
10149 /* The lazy approach for now... */
10150 if ( (GCPtrMem & 15)
10151 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10152 return iemRaiseGeneralProtectionFault0(pVCpu);
10153
10154 PRTUINT128U pu128Dst;
10155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10156 if (rc == VINF_SUCCESS)
10157 {
10158 pu128Dst->au64[0] = u128Value.au64[0];
10159 pu128Dst->au64[1] = u128Value.au64[1];
10160 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10161 }
10162 return rc;
10163}
10164
10165
10166#ifdef IEM_WITH_SETJMP
10167/**
10168 * Stores a data dqword, SSE aligned.
10169 *
10170 * @returns Strict VBox status code.
10171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10172 * @param iSegReg The index of the segment register to use for
10173 * this access. The base and limits are checked.
10174 * @param GCPtrMem The address of the guest memory.
10175 * @param u128Value The value to store.
10176 */
10177DECL_NO_INLINE(IEM_STATIC, void)
10178iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10179{
10180 /* The lazy approach for now... */
10181 if ( (GCPtrMem & 15) == 0
10182 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10183 {
10184 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10185 pu128Dst->au64[0] = u128Value.au64[0];
10186 pu128Dst->au64[1] = u128Value.au64[1];
10187 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10188 return;
10189 }
10190
10191 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10192 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10193}
10194#endif
10195
10196
10197/**
10198 * Stores a data dqword.
10199 *
10200 * @returns Strict VBox status code.
10201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10202 * @param iSegReg The index of the segment register to use for
10203 * this access. The base and limits are checked.
10204 * @param GCPtrMem The address of the guest memory.
10205 * @param pu256Value Pointer to the value to store.
10206 */
10207IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10208{
10209 /* The lazy approach for now... */
10210 PRTUINT256U pu256Dst;
10211 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10212 if (rc == VINF_SUCCESS)
10213 {
10214 pu256Dst->au64[0] = pu256Value->au64[0];
10215 pu256Dst->au64[1] = pu256Value->au64[1];
10216 pu256Dst->au64[2] = pu256Value->au64[2];
10217 pu256Dst->au64[3] = pu256Value->au64[3];
10218 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10219 }
10220 return rc;
10221}
10222
10223
10224#ifdef IEM_WITH_SETJMP
10225/**
10226 * Stores a data dqword, longjmp on error.
10227 *
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param iSegReg The index of the segment register to use for
10230 * this access. The base and limits are checked.
10231 * @param GCPtrMem The address of the guest memory.
10232 * @param pu256Value Pointer to the value to store.
10233 */
10234IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10235{
10236 /* The lazy approach for now... */
10237 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10238 pu256Dst->au64[0] = pu256Value->au64[0];
10239 pu256Dst->au64[1] = pu256Value->au64[1];
10240 pu256Dst->au64[2] = pu256Value->au64[2];
10241 pu256Dst->au64[3] = pu256Value->au64[3];
10242 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10243}
10244#endif
10245
10246
10247/**
10248 * Stores a data dqword, AVX aligned.
10249 *
10250 * @returns Strict VBox status code.
10251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10252 * @param iSegReg The index of the segment register to use for
10253 * this access. The base and limits are checked.
10254 * @param GCPtrMem The address of the guest memory.
10255 * @param pu256Value Pointer to the value to store.
10256 */
10257IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10258{
10259 /* The lazy approach for now... */
10260 if (GCPtrMem & 31)
10261 return iemRaiseGeneralProtectionFault0(pVCpu);
10262
10263 PRTUINT256U pu256Dst;
10264 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10265 if (rc == VINF_SUCCESS)
10266 {
10267 pu256Dst->au64[0] = pu256Value->au64[0];
10268 pu256Dst->au64[1] = pu256Value->au64[1];
10269 pu256Dst->au64[2] = pu256Value->au64[2];
10270 pu256Dst->au64[3] = pu256Value->au64[3];
10271 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10272 }
10273 return rc;
10274}
10275
10276
10277#ifdef IEM_WITH_SETJMP
10278/**
10279 * Stores a data dqword, AVX aligned.
10280 *
10281 * @returns Strict VBox status code.
10282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 * @param pu256Value Pointer to the value to store.
10287 */
10288DECL_NO_INLINE(IEM_STATIC, void)
10289iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10290{
10291 /* The lazy approach for now... */
10292 if ((GCPtrMem & 31) == 0)
10293 {
10294 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10295 pu256Dst->au64[0] = pu256Value->au64[0];
10296 pu256Dst->au64[1] = pu256Value->au64[1];
10297 pu256Dst->au64[2] = pu256Value->au64[2];
10298 pu256Dst->au64[3] = pu256Value->au64[3];
10299 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10300 return;
10301 }
10302
10303 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10304 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10305}
10306#endif
10307
10308
10309/**
10310 * Stores a descriptor register (sgdt, sidt).
10311 *
10312 * @returns Strict VBox status code.
10313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10314 * @param cbLimit The limit.
10315 * @param GCPtrBase The base address.
10316 * @param iSegReg The index of the segment register to use for
10317 * this access. The base and limits are checked.
10318 * @param GCPtrMem The address of the guest memory.
10319 */
10320IEM_STATIC VBOXSTRICTRC
10321iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10322{
10323 VBOXSTRICTRC rcStrict;
10324 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10325 {
10326 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10327 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10328 }
10329
10330 /*
10331 * The SIDT and SGDT instructions actually stores the data using two
10332 * independent writes. The instructions does not respond to opsize prefixes.
10333 */
10334 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10335 if (rcStrict == VINF_SUCCESS)
10336 {
10337 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10338 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10339 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10340 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10341 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10342 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10343 else
10344 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10345 }
10346 return rcStrict;
10347}
10348
10349
10350/**
10351 * Pushes a word onto the stack.
10352 *
10353 * @returns Strict VBox status code.
10354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10355 * @param u16Value The value to push.
10356 */
10357IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10358{
10359 /* Increment the stack pointer. */
10360 uint64_t uNewRsp;
10361 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10362 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10363
10364 /* Write the word the lazy way. */
10365 uint16_t *pu16Dst;
10366 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10367 if (rc == VINF_SUCCESS)
10368 {
10369 *pu16Dst = u16Value;
10370 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10371 }
10372
10373 /* Commit the new RSP value unless we an access handler made trouble. */
10374 if (rc == VINF_SUCCESS)
10375 pCtx->rsp = uNewRsp;
10376
10377 return rc;
10378}
10379
10380
10381/**
10382 * Pushes a dword onto the stack.
10383 *
10384 * @returns Strict VBox status code.
10385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10386 * @param u32Value The value to push.
10387 */
10388IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10389{
10390 /* Increment the stack pointer. */
10391 uint64_t uNewRsp;
10392 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10393 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10394
10395 /* Write the dword the lazy way. */
10396 uint32_t *pu32Dst;
10397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10398 if (rc == VINF_SUCCESS)
10399 {
10400 *pu32Dst = u32Value;
10401 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10402 }
10403
10404 /* Commit the new RSP value unless we an access handler made trouble. */
10405 if (rc == VINF_SUCCESS)
10406 pCtx->rsp = uNewRsp;
10407
10408 return rc;
10409}
10410
10411
10412/**
10413 * Pushes a dword segment register value onto the stack.
10414 *
10415 * @returns Strict VBox status code.
10416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10417 * @param u32Value The value to push.
10418 */
10419IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10420{
10421 /* Increment the stack pointer. */
10422 uint64_t uNewRsp;
10423 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10424 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10425
10426 VBOXSTRICTRC rc;
10427 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10428 {
10429 /* The recompiler writes a full dword. */
10430 uint32_t *pu32Dst;
10431 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10432 if (rc == VINF_SUCCESS)
10433 {
10434 *pu32Dst = u32Value;
10435 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10436 }
10437 }
10438 else
10439 {
10440 /* The intel docs talks about zero extending the selector register
10441 value. My actual intel CPU here might be zero extending the value
10442 but it still only writes the lower word... */
10443 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10444 * happens when crossing an electric page boundrary, is the high word checked
10445 * for write accessibility or not? Probably it is. What about segment limits?
10446 * It appears this behavior is also shared with trap error codes.
10447 *
10448 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10449 * ancient hardware when it actually did change. */
10450 uint16_t *pu16Dst;
10451 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10452 if (rc == VINF_SUCCESS)
10453 {
10454 *pu16Dst = (uint16_t)u32Value;
10455 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10456 }
10457 }
10458
10459 /* Commit the new RSP value unless we an access handler made trouble. */
10460 if (rc == VINF_SUCCESS)
10461 pCtx->rsp = uNewRsp;
10462
10463 return rc;
10464}
10465
10466
10467/**
10468 * Pushes a qword onto the stack.
10469 *
10470 * @returns Strict VBox status code.
10471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10472 * @param u64Value The value to push.
10473 */
10474IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10475{
10476 /* Increment the stack pointer. */
10477 uint64_t uNewRsp;
10478 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10479 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10480
10481 /* Write the word the lazy way. */
10482 uint64_t *pu64Dst;
10483 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10484 if (rc == VINF_SUCCESS)
10485 {
10486 *pu64Dst = u64Value;
10487 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10488 }
10489
10490 /* Commit the new RSP value unless we an access handler made trouble. */
10491 if (rc == VINF_SUCCESS)
10492 pCtx->rsp = uNewRsp;
10493
10494 return rc;
10495}
10496
10497
10498/**
10499 * Pops a word from the stack.
10500 *
10501 * @returns Strict VBox status code.
10502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10503 * @param pu16Value Where to store the popped value.
10504 */
10505IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10506{
10507 /* Increment the stack pointer. */
10508 uint64_t uNewRsp;
10509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10510 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10511
10512 /* Write the word the lazy way. */
10513 uint16_t const *pu16Src;
10514 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10515 if (rc == VINF_SUCCESS)
10516 {
10517 *pu16Value = *pu16Src;
10518 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10519
10520 /* Commit the new RSP value. */
10521 if (rc == VINF_SUCCESS)
10522 pCtx->rsp = uNewRsp;
10523 }
10524
10525 return rc;
10526}
10527
10528
10529/**
10530 * Pops a dword from the stack.
10531 *
10532 * @returns Strict VBox status code.
10533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10534 * @param pu32Value Where to store the popped value.
10535 */
10536IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10537{
10538 /* Increment the stack pointer. */
10539 uint64_t uNewRsp;
10540 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10541 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10542
10543 /* Write the word the lazy way. */
10544 uint32_t const *pu32Src;
10545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10546 if (rc == VINF_SUCCESS)
10547 {
10548 *pu32Value = *pu32Src;
10549 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10550
10551 /* Commit the new RSP value. */
10552 if (rc == VINF_SUCCESS)
10553 pCtx->rsp = uNewRsp;
10554 }
10555
10556 return rc;
10557}
10558
10559
10560/**
10561 * Pops a qword from the stack.
10562 *
10563 * @returns Strict VBox status code.
10564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10565 * @param pu64Value Where to store the popped value.
10566 */
10567IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10568{
10569 /* Increment the stack pointer. */
10570 uint64_t uNewRsp;
10571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10572 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10573
10574 /* Write the word the lazy way. */
10575 uint64_t const *pu64Src;
10576 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10577 if (rc == VINF_SUCCESS)
10578 {
10579 *pu64Value = *pu64Src;
10580 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10581
10582 /* Commit the new RSP value. */
10583 if (rc == VINF_SUCCESS)
10584 pCtx->rsp = uNewRsp;
10585 }
10586
10587 return rc;
10588}
10589
10590
10591/**
10592 * Pushes a word onto the stack, using a temporary stack pointer.
10593 *
10594 * @returns Strict VBox status code.
10595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10596 * @param u16Value The value to push.
10597 * @param pTmpRsp Pointer to the temporary stack pointer.
10598 */
10599IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10600{
10601 /* Increment the stack pointer. */
10602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10603 RTUINT64U NewRsp = *pTmpRsp;
10604 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10605
10606 /* Write the word the lazy way. */
10607 uint16_t *pu16Dst;
10608 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10609 if (rc == VINF_SUCCESS)
10610 {
10611 *pu16Dst = u16Value;
10612 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10613 }
10614
10615 /* Commit the new RSP value unless we an access handler made trouble. */
10616 if (rc == VINF_SUCCESS)
10617 *pTmpRsp = NewRsp;
10618
10619 return rc;
10620}
10621
10622
10623/**
10624 * Pushes a dword onto the stack, using a temporary stack pointer.
10625 *
10626 * @returns Strict VBox status code.
10627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10628 * @param u32Value The value to push.
10629 * @param pTmpRsp Pointer to the temporary stack pointer.
10630 */
10631IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10632{
10633 /* Increment the stack pointer. */
10634 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10635 RTUINT64U NewRsp = *pTmpRsp;
10636 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10637
10638 /* Write the word the lazy way. */
10639 uint32_t *pu32Dst;
10640 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10641 if (rc == VINF_SUCCESS)
10642 {
10643 *pu32Dst = u32Value;
10644 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10645 }
10646
10647 /* Commit the new RSP value unless we an access handler made trouble. */
10648 if (rc == VINF_SUCCESS)
10649 *pTmpRsp = NewRsp;
10650
10651 return rc;
10652}
10653
10654
10655/**
10656 * Pushes a dword onto the stack, using a temporary stack pointer.
10657 *
10658 * @returns Strict VBox status code.
10659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10660 * @param u64Value The value to push.
10661 * @param pTmpRsp Pointer to the temporary stack pointer.
10662 */
10663IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10664{
10665 /* Increment the stack pointer. */
10666 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10667 RTUINT64U NewRsp = *pTmpRsp;
10668 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10669
10670 /* Write the word the lazy way. */
10671 uint64_t *pu64Dst;
10672 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10673 if (rc == VINF_SUCCESS)
10674 {
10675 *pu64Dst = u64Value;
10676 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10677 }
10678
10679 /* Commit the new RSP value unless we an access handler made trouble. */
10680 if (rc == VINF_SUCCESS)
10681 *pTmpRsp = NewRsp;
10682
10683 return rc;
10684}
10685
10686
10687/**
10688 * Pops a word from the stack, using a temporary stack pointer.
10689 *
10690 * @returns Strict VBox status code.
10691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10692 * @param pu16Value Where to store the popped value.
10693 * @param pTmpRsp Pointer to the temporary stack pointer.
10694 */
10695IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10696{
10697 /* Increment the stack pointer. */
10698 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10699 RTUINT64U NewRsp = *pTmpRsp;
10700 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10701
10702 /* Write the word the lazy way. */
10703 uint16_t const *pu16Src;
10704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10705 if (rc == VINF_SUCCESS)
10706 {
10707 *pu16Value = *pu16Src;
10708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10709
10710 /* Commit the new RSP value. */
10711 if (rc == VINF_SUCCESS)
10712 *pTmpRsp = NewRsp;
10713 }
10714
10715 return rc;
10716}
10717
10718
10719/**
10720 * Pops a dword from the stack, using a temporary stack pointer.
10721 *
10722 * @returns Strict VBox status code.
10723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10724 * @param pu32Value Where to store the popped value.
10725 * @param pTmpRsp Pointer to the temporary stack pointer.
10726 */
10727IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10728{
10729 /* Increment the stack pointer. */
10730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10731 RTUINT64U NewRsp = *pTmpRsp;
10732 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10733
10734 /* Write the word the lazy way. */
10735 uint32_t const *pu32Src;
10736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10737 if (rc == VINF_SUCCESS)
10738 {
10739 *pu32Value = *pu32Src;
10740 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10741
10742 /* Commit the new RSP value. */
10743 if (rc == VINF_SUCCESS)
10744 *pTmpRsp = NewRsp;
10745 }
10746
10747 return rc;
10748}
10749
10750
10751/**
10752 * Pops a qword from the stack, using a temporary stack pointer.
10753 *
10754 * @returns Strict VBox status code.
10755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10756 * @param pu64Value Where to store the popped value.
10757 * @param pTmpRsp Pointer to the temporary stack pointer.
10758 */
10759IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10760{
10761 /* Increment the stack pointer. */
10762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10763 RTUINT64U NewRsp = *pTmpRsp;
10764 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10765
10766 /* Write the word the lazy way. */
10767 uint64_t const *pu64Src;
10768 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10769 if (rcStrict == VINF_SUCCESS)
10770 {
10771 *pu64Value = *pu64Src;
10772 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10773
10774 /* Commit the new RSP value. */
10775 if (rcStrict == VINF_SUCCESS)
10776 *pTmpRsp = NewRsp;
10777 }
10778
10779 return rcStrict;
10780}
10781
10782
10783/**
10784 * Begin a special stack push (used by interrupt, exceptions and such).
10785 *
10786 * This will raise \#SS or \#PF if appropriate.
10787 *
10788 * @returns Strict VBox status code.
10789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10790 * @param cbMem The number of bytes to push onto the stack.
10791 * @param ppvMem Where to return the pointer to the stack memory.
10792 * As with the other memory functions this could be
10793 * direct access or bounce buffered access, so
10794 * don't commit register until the commit call
10795 * succeeds.
10796 * @param puNewRsp Where to return the new RSP value. This must be
10797 * passed unchanged to
10798 * iemMemStackPushCommitSpecial().
10799 */
10800IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10801{
10802 Assert(cbMem < UINT8_MAX);
10803 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10804 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10805 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10806}
10807
10808
10809/**
10810 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10811 *
10812 * This will update the rSP.
10813 *
10814 * @returns Strict VBox status code.
10815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10816 * @param pvMem The pointer returned by
10817 * iemMemStackPushBeginSpecial().
10818 * @param uNewRsp The new RSP value returned by
10819 * iemMemStackPushBeginSpecial().
10820 */
10821IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10822{
10823 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10824 if (rcStrict == VINF_SUCCESS)
10825 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10826 return rcStrict;
10827}
10828
10829
10830/**
10831 * Begin a special stack pop (used by iret, retf and such).
10832 *
10833 * This will raise \#SS or \#PF if appropriate.
10834 *
10835 * @returns Strict VBox status code.
10836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10837 * @param cbMem The number of bytes to pop from the stack.
10838 * @param ppvMem Where to return the pointer to the stack memory.
10839 * @param puNewRsp Where to return the new RSP value. This must be
10840 * assigned to CPUMCTX::rsp manually some time
10841 * after iemMemStackPopDoneSpecial() has been
10842 * called.
10843 */
10844IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10845{
10846 Assert(cbMem < UINT8_MAX);
10847 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10848 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10849 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10850}
10851
10852
10853/**
10854 * Continue a special stack pop (used by iret and retf).
10855 *
10856 * This will raise \#SS or \#PF if appropriate.
10857 *
10858 * @returns Strict VBox status code.
10859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10860 * @param cbMem The number of bytes to pop from the stack.
10861 * @param ppvMem Where to return the pointer to the stack memory.
10862 * @param puNewRsp Where to return the new RSP value. This must be
10863 * assigned to CPUMCTX::rsp manually some time
10864 * after iemMemStackPopDoneSpecial() has been
10865 * called.
10866 */
10867IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10868{
10869 Assert(cbMem < UINT8_MAX);
10870 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10871 RTUINT64U NewRsp;
10872 NewRsp.u = *puNewRsp;
10873 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10874 *puNewRsp = NewRsp.u;
10875 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10876}
10877
10878
10879/**
10880 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10881 * iemMemStackPopContinueSpecial).
10882 *
10883 * The caller will manually commit the rSP.
10884 *
10885 * @returns Strict VBox status code.
10886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10887 * @param pvMem The pointer returned by
10888 * iemMemStackPopBeginSpecial() or
10889 * iemMemStackPopContinueSpecial().
10890 */
10891IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10892{
10893 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10894}
10895
10896
10897/**
10898 * Fetches a system table byte.
10899 *
10900 * @returns Strict VBox status code.
10901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10902 * @param pbDst Where to return the byte.
10903 * @param iSegReg The index of the segment register to use for
10904 * this access. The base and limits are checked.
10905 * @param GCPtrMem The address of the guest memory.
10906 */
10907IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10908{
10909 /* The lazy approach for now... */
10910 uint8_t const *pbSrc;
10911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10912 if (rc == VINF_SUCCESS)
10913 {
10914 *pbDst = *pbSrc;
10915 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10916 }
10917 return rc;
10918}
10919
10920
10921/**
10922 * Fetches a system table word.
10923 *
10924 * @returns Strict VBox status code.
10925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10926 * @param pu16Dst Where to return the word.
10927 * @param iSegReg The index of the segment register to use for
10928 * this access. The base and limits are checked.
10929 * @param GCPtrMem The address of the guest memory.
10930 */
10931IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10932{
10933 /* The lazy approach for now... */
10934 uint16_t const *pu16Src;
10935 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10936 if (rc == VINF_SUCCESS)
10937 {
10938 *pu16Dst = *pu16Src;
10939 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10940 }
10941 return rc;
10942}
10943
10944
10945/**
10946 * Fetches a system table dword.
10947 *
10948 * @returns Strict VBox status code.
10949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10950 * @param pu32Dst Where to return the dword.
10951 * @param iSegReg The index of the segment register to use for
10952 * this access. The base and limits are checked.
10953 * @param GCPtrMem The address of the guest memory.
10954 */
10955IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10956{
10957 /* The lazy approach for now... */
10958 uint32_t const *pu32Src;
10959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10960 if (rc == VINF_SUCCESS)
10961 {
10962 *pu32Dst = *pu32Src;
10963 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10964 }
10965 return rc;
10966}
10967
10968
10969/**
10970 * Fetches a system table qword.
10971 *
10972 * @returns Strict VBox status code.
10973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10974 * @param pu64Dst Where to return the qword.
10975 * @param iSegReg The index of the segment register to use for
10976 * this access. The base and limits are checked.
10977 * @param GCPtrMem The address of the guest memory.
10978 */
10979IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10980{
10981 /* The lazy approach for now... */
10982 uint64_t const *pu64Src;
10983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10984 if (rc == VINF_SUCCESS)
10985 {
10986 *pu64Dst = *pu64Src;
10987 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10988 }
10989 return rc;
10990}
10991
10992
10993/**
10994 * Fetches a descriptor table entry with caller specified error code.
10995 *
10996 * @returns Strict VBox status code.
10997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10998 * @param pDesc Where to return the descriptor table entry.
10999 * @param uSel The selector which table entry to fetch.
11000 * @param uXcpt The exception to raise on table lookup error.
11001 * @param uErrorCode The error code associated with the exception.
11002 */
11003IEM_STATIC VBOXSTRICTRC
11004iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
11005{
11006 AssertPtr(pDesc);
11007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11008
11009 /** @todo did the 286 require all 8 bytes to be accessible? */
11010 /*
11011 * Get the selector table base and check bounds.
11012 */
11013 RTGCPTR GCPtrBase;
11014 if (uSel & X86_SEL_LDT)
11015 {
11016 if ( !pCtx->ldtr.Attr.n.u1Present
11017 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
11018 {
11019 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
11020 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
11021 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11022 uErrorCode, 0);
11023 }
11024
11025 Assert(pCtx->ldtr.Attr.n.u1Present);
11026 GCPtrBase = pCtx->ldtr.u64Base;
11027 }
11028 else
11029 {
11030 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11031 {
11032 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11033 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11034 uErrorCode, 0);
11035 }
11036 GCPtrBase = pCtx->gdtr.pGdt;
11037 }
11038
11039 /*
11040 * Read the legacy descriptor and maybe the long mode extensions if
11041 * required.
11042 */
11043 VBOXSTRICTRC rcStrict;
11044 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11045 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11046 else
11047 {
11048 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11049 if (rcStrict == VINF_SUCCESS)
11050 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11051 if (rcStrict == VINF_SUCCESS)
11052 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11053 if (rcStrict == VINF_SUCCESS)
11054 pDesc->Legacy.au16[3] = 0;
11055 else
11056 return rcStrict;
11057 }
11058
11059 if (rcStrict == VINF_SUCCESS)
11060 {
11061 if ( !IEM_IS_LONG_MODE(pVCpu)
11062 || pDesc->Legacy.Gen.u1DescType)
11063 pDesc->Long.au64[1] = 0;
11064 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11065 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11066 else
11067 {
11068 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11069 /** @todo is this the right exception? */
11070 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11071 }
11072 }
11073 return rcStrict;
11074}
11075
11076
11077/**
11078 * Fetches a descriptor table entry.
11079 *
11080 * @returns Strict VBox status code.
11081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11082 * @param pDesc Where to return the descriptor table entry.
11083 * @param uSel The selector which table entry to fetch.
11084 * @param uXcpt The exception to raise on table lookup error.
11085 */
11086IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11087{
11088 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11089}
11090
11091
11092/**
11093 * Fakes a long mode stack selector for SS = 0.
11094 *
11095 * @param pDescSs Where to return the fake stack descriptor.
11096 * @param uDpl The DPL we want.
11097 */
11098IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11099{
11100 pDescSs->Long.au64[0] = 0;
11101 pDescSs->Long.au64[1] = 0;
11102 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11103 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11104 pDescSs->Long.Gen.u2Dpl = uDpl;
11105 pDescSs->Long.Gen.u1Present = 1;
11106 pDescSs->Long.Gen.u1Long = 1;
11107}
11108
11109
11110/**
11111 * Marks the selector descriptor as accessed (only non-system descriptors).
11112 *
11113 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11114 * will therefore skip the limit checks.
11115 *
11116 * @returns Strict VBox status code.
11117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11118 * @param uSel The selector.
11119 */
11120IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11121{
11122 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11123
11124 /*
11125 * Get the selector table base and calculate the entry address.
11126 */
11127 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11128 ? pCtx->ldtr.u64Base
11129 : pCtx->gdtr.pGdt;
11130 GCPtr += uSel & X86_SEL_MASK;
11131
11132 /*
11133 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11134 * ugly stuff to avoid this. This will make sure it's an atomic access
11135 * as well more or less remove any question about 8-bit or 32-bit accesss.
11136 */
11137 VBOXSTRICTRC rcStrict;
11138 uint32_t volatile *pu32;
11139 if ((GCPtr & 3) == 0)
11140 {
11141 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11142 GCPtr += 2 + 2;
11143 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11144 if (rcStrict != VINF_SUCCESS)
11145 return rcStrict;
11146 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11147 }
11148 else
11149 {
11150 /* The misaligned GDT/LDT case, map the whole thing. */
11151 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11152 if (rcStrict != VINF_SUCCESS)
11153 return rcStrict;
11154 switch ((uintptr_t)pu32 & 3)
11155 {
11156 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11157 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11158 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11159 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11160 }
11161 }
11162
11163 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11164}
11165
11166/** @} */
11167
11168
11169/*
11170 * Include the C/C++ implementation of instruction.
11171 */
11172#include "IEMAllCImpl.cpp.h"
11173
11174
11175
11176/** @name "Microcode" macros.
11177 *
11178 * The idea is that we should be able to use the same code to interpret
11179 * instructions as well as recompiler instructions. Thus this obfuscation.
11180 *
11181 * @{
11182 */
11183#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11184#define IEM_MC_END() }
11185#define IEM_MC_PAUSE() do {} while (0)
11186#define IEM_MC_CONTINUE() do {} while (0)
11187
11188/** Internal macro. */
11189#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11190 do \
11191 { \
11192 VBOXSTRICTRC rcStrict2 = a_Expr; \
11193 if (rcStrict2 != VINF_SUCCESS) \
11194 return rcStrict2; \
11195 } while (0)
11196
11197
11198#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11199#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11200#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11201#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11202#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11203#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11204#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11205#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11206#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11207 do { \
11208 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11209 return iemRaiseDeviceNotAvailable(pVCpu); \
11210 } while (0)
11211#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11212 do { \
11213 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11214 return iemRaiseDeviceNotAvailable(pVCpu); \
11215 } while (0)
11216#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11217 do { \
11218 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11219 return iemRaiseMathFault(pVCpu); \
11220 } while (0)
11221#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11222 do { \
11223 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11224 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11225 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11226 return iemRaiseUndefinedOpcode(pVCpu); \
11227 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11228 return iemRaiseDeviceNotAvailable(pVCpu); \
11229 } while (0)
11230#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11231 do { \
11232 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11233 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11234 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11235 return iemRaiseUndefinedOpcode(pVCpu); \
11236 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11237 return iemRaiseDeviceNotAvailable(pVCpu); \
11238 } while (0)
11239#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11240 do { \
11241 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11242 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11243 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11244 return iemRaiseUndefinedOpcode(pVCpu); \
11245 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11246 return iemRaiseDeviceNotAvailable(pVCpu); \
11247 } while (0)
11248#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11249 do { \
11250 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11251 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11252 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11253 return iemRaiseUndefinedOpcode(pVCpu); \
11254 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11255 return iemRaiseDeviceNotAvailable(pVCpu); \
11256 } while (0)
11257#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11258 do { \
11259 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11260 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11261 return iemRaiseUndefinedOpcode(pVCpu); \
11262 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11263 return iemRaiseDeviceNotAvailable(pVCpu); \
11264 } while (0)
11265#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11266 do { \
11267 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11268 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11269 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11270 return iemRaiseUndefinedOpcode(pVCpu); \
11271 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11272 return iemRaiseDeviceNotAvailable(pVCpu); \
11273 } while (0)
11274#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11275 do { \
11276 if (pVCpu->iem.s.uCpl != 0) \
11277 return iemRaiseGeneralProtectionFault0(pVCpu); \
11278 } while (0)
11279#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11280 do { \
11281 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11282 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11283 } while (0)
11284
11285
11286#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11287#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11288#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11289#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11290#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11291#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11292#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11293 uint32_t a_Name; \
11294 uint32_t *a_pName = &a_Name
11295#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11296 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11297
11298#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11299#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11300
11301#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11318#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11319#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11320#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11321#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11322#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11323#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11324#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11325#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11326#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11327#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11328#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11329#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11330/** @note Not for IOPL or IF testing or modification. */
11331#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11332#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11333#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11334#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11335
11336#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11337#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11338#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11339#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11340#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11341#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11342#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11343#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11344#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11345#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11346#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11347 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11348
11349#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11350#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11351/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11352 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11353#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11354#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11355/** @note Not for IOPL or IF testing or modification. */
11356#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11357
11358#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11359#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11360#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11361 do { \
11362 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11363 *pu32Reg += (a_u32Value); \
11364 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11365 } while (0)
11366#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11367
11368#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11369#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11370#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11371 do { \
11372 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11373 *pu32Reg -= (a_u32Value); \
11374 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11375 } while (0)
11376#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11377#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11378
11379#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11380#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11381#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11382#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11383#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11384#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11385#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11386
11387#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11388#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11389#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11390#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11391
11392#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11393#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11394#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11395
11396#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11397#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11398#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11399
11400#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11401#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11402#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11403
11404#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11405#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11406#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11407
11408#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11409
11410#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11411
11412#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11413#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11414#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11415 do { \
11416 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11417 *pu32Reg &= (a_u32Value); \
11418 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11419 } while (0)
11420#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11421
11422#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11423#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11424#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11425 do { \
11426 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11427 *pu32Reg |= (a_u32Value); \
11428 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11429 } while (0)
11430#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11431
11432
11433/** @note Not for IOPL or IF modification. */
11434#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11435/** @note Not for IOPL or IF modification. */
11436#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11437/** @note Not for IOPL or IF modification. */
11438#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11439
11440#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11441
11442/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11443#define IEM_MC_FPU_TO_MMX_MODE() do { \
11444 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11445 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11446 } while (0)
11447
11448#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11449 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11450#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11451 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11452#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11453 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11454 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11455 } while (0)
11456#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11457 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11458 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11459 } while (0)
11460#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11461 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11462#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11463 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11464#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11465 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11466
11467#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11468 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11469 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11470 } while (0)
11471#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11472 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11473#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11474 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11475#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11476 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11477#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11478 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11479 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11480 } while (0)
11481#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11482 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11483#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11484 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11485 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11486 } while (0)
11487#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11488 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11489#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11490 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11491 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11492 } while (0)
11493#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11494 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11495#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11496 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11497#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11498 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11499#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11500 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11501#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11502 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11503 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11504 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11505 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11506 } while (0)
11507
11508#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11509 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11510 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11511 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11512 } while (0)
11513#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11514 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11515 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11516 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11517 } while (0)
11518#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11519 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11520 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11521 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11522 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11523 } while (0)
11524#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11525 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11526 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11527 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11528 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11529 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11530 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11531 } while (0)
11532
11533#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11534#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11535 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11536 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11540 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11542 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11543 } while (0)
11544#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11545 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11546 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11547 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11548 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11549 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11550 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11551 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11552 } while (0)
11553#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11554 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11555 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11560 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11561 } while (0)
11562#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11563 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11564 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571
11572#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11573 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11574#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11575 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11576#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11577 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11578#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11579 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11580 uintptr_t const iYRegTmp = (a_iYReg); \
11581 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11583 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11584 } while (0)
11585
11586#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11587 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11588 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11589 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11590 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11591 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11592 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11593 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11594 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11595 } while (0)
11596#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11597 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11598 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11599 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11600 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11601 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11604 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11605 } while (0)
11606
11607#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11608 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11609 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11610 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11611 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11612 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11613 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11614 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11615 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11616 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11617 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11618 } while (0)
11619#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11620 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11621 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11622 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11623 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11625 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11626 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11627 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11628 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11629 } while (0)
11630#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11631 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11632 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11633 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11634 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11635 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11636 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11637 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11638 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11639 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11640 } while (0)
11641#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11642 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11643 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11644 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11645 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11646 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11647 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11648 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11649 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11650 } while (0)
11651
11652#ifndef IEM_WITH_SETJMP
11653# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11655# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11657# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11659#else
11660# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11661 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11663 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11664# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11665 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11666#endif
11667
11668#ifndef IEM_WITH_SETJMP
11669# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11671# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11673# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11675#else
11676# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11677 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11679 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11680# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682#endif
11683
11684#ifndef IEM_WITH_SETJMP
11685# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11687# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11689# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11691#else
11692# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11693 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11695 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11696# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11697 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11698#endif
11699
11700#ifdef SOME_UNUSED_FUNCTION
11701# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11703#endif
11704
11705#ifndef IEM_WITH_SETJMP
11706# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11710# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11714#else
11715# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11718 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11719# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11722 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11723#endif
11724
11725#ifndef IEM_WITH_SETJMP
11726# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11730# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11732#else
11733# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11734 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11735# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11736 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11737# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11738 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11739#endif
11740
11741#ifndef IEM_WITH_SETJMP
11742# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11746#else
11747# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11748 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11749# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11750 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11751#endif
11752
11753#ifndef IEM_WITH_SETJMP
11754# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11758#else
11759# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11760 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11761# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11762 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11763#endif
11764
11765
11766
11767#ifndef IEM_WITH_SETJMP
11768# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint8_t u8Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u16Dst) = u8Tmp; \
11773 } while (0)
11774# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11775 do { \
11776 uint8_t u8Tmp; \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11778 (a_u32Dst) = u8Tmp; \
11779 } while (0)
11780# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint8_t u8Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u64Dst) = u8Tmp; \
11785 } while (0)
11786# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11787 do { \
11788 uint16_t u16Tmp; \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11790 (a_u32Dst) = u16Tmp; \
11791 } while (0)
11792# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11793 do { \
11794 uint16_t u16Tmp; \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11796 (a_u64Dst) = u16Tmp; \
11797 } while (0)
11798# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11799 do { \
11800 uint32_t u32Tmp; \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11802 (a_u64Dst) = u32Tmp; \
11803 } while (0)
11804#else /* IEM_WITH_SETJMP */
11805# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11806 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11807# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11808 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11809# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11813# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11814 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11815# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11816 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11817#endif /* IEM_WITH_SETJMP */
11818
11819#ifndef IEM_WITH_SETJMP
11820# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint8_t u8Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u16Dst) = (int8_t)u8Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint8_t u8Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u32Dst) = (int8_t)u8Tmp; \
11831 } while (0)
11832# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11833 do { \
11834 uint8_t u8Tmp; \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11836 (a_u64Dst) = (int8_t)u8Tmp; \
11837 } while (0)
11838# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11839 do { \
11840 uint16_t u16Tmp; \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11842 (a_u32Dst) = (int16_t)u16Tmp; \
11843 } while (0)
11844# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11845 do { \
11846 uint16_t u16Tmp; \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11848 (a_u64Dst) = (int16_t)u16Tmp; \
11849 } while (0)
11850# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11851 do { \
11852 uint32_t u32Tmp; \
11853 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11854 (a_u64Dst) = (int32_t)u32Tmp; \
11855 } while (0)
11856#else /* IEM_WITH_SETJMP */
11857# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11858 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11859# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11860 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11861# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11862 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11863# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11864 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11865# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11866 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11867# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11868 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11869#endif /* IEM_WITH_SETJMP */
11870
11871#ifndef IEM_WITH_SETJMP
11872# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11874# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11876# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11878# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11880#else
11881# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11882 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11883# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11884 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11885# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11886 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11887# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11888 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11889#endif
11890
11891#ifndef IEM_WITH_SETJMP
11892# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11893 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11894# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11896# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11898# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11900#else
11901# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11902 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11903# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11904 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11905# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11906 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11907# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11908 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11909#endif
11910
11911#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11912#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11913#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11914#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11915#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11916#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11917#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11918 do { \
11919 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11920 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11921 } while (0)
11922
11923#ifndef IEM_WITH_SETJMP
11924# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11926# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11928#else
11929# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11930 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11931# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11932 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11933#endif
11934
11935#ifndef IEM_WITH_SETJMP
11936# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11938# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11940#else
11941# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11942 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11943# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11944 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11945#endif
11946
11947
11948#define IEM_MC_PUSH_U16(a_u16Value) \
11949 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11950#define IEM_MC_PUSH_U32(a_u32Value) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11952#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11954#define IEM_MC_PUSH_U64(a_u64Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11956
11957#define IEM_MC_POP_U16(a_pu16Value) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11959#define IEM_MC_POP_U32(a_pu32Value) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11961#define IEM_MC_POP_U64(a_pu64Value) \
11962 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11963
11964/** Maps guest memory for direct or bounce buffered access.
11965 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11966 * @remarks May return.
11967 */
11968#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11969 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11970
11971/** Maps guest memory for direct or bounce buffered access.
11972 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11973 * @remarks May return.
11974 */
11975#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11977
11978/** Commits the memory and unmaps the guest memory.
11979 * @remarks May return.
11980 */
11981#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11983
11984/** Commits the memory and unmaps the guest memory unless the FPU status word
11985 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11986 * that would cause FLD not to store.
11987 *
11988 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11989 * store, while \#P will not.
11990 *
11991 * @remarks May in theory return - for now.
11992 */
11993#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11994 do { \
11995 if ( !(a_u16FSW & X86_FSW_ES) \
11996 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11997 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11998 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11999 } while (0)
12000
12001/** Calculate efficient address from R/M. */
12002#ifndef IEM_WITH_SETJMP
12003# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12004 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12005#else
12006# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12007 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12008#endif
12009
12010#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12011#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12012#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12013#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12014#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12015#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12016#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12017
12018/**
12019 * Defers the rest of the instruction emulation to a C implementation routine
12020 * and returns, only taking the standard parameters.
12021 *
12022 * @param a_pfnCImpl The pointer to the C routine.
12023 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12024 */
12025#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12026
12027/**
12028 * Defers the rest of instruction emulation to a C implementation routine and
12029 * returns, taking one argument in addition to the standard ones.
12030 *
12031 * @param a_pfnCImpl The pointer to the C routine.
12032 * @param a0 The argument.
12033 */
12034#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12035
12036/**
12037 * Defers the rest of the instruction emulation to a C implementation routine
12038 * and returns, taking two arguments in addition to the standard ones.
12039 *
12040 * @param a_pfnCImpl The pointer to the C routine.
12041 * @param a0 The first extra argument.
12042 * @param a1 The second extra argument.
12043 */
12044#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12045
12046/**
12047 * Defers the rest of the instruction emulation to a C implementation routine
12048 * and returns, taking three arguments in addition to the standard ones.
12049 *
12050 * @param a_pfnCImpl The pointer to the C routine.
12051 * @param a0 The first extra argument.
12052 * @param a1 The second extra argument.
12053 * @param a2 The third extra argument.
12054 */
12055#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12056
12057/**
12058 * Defers the rest of the instruction emulation to a C implementation routine
12059 * and returns, taking four arguments in addition to the standard ones.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @param a0 The first extra argument.
12063 * @param a1 The second extra argument.
12064 * @param a2 The third extra argument.
12065 * @param a3 The fourth extra argument.
12066 */
12067#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12068
12069/**
12070 * Defers the rest of the instruction emulation to a C implementation routine
12071 * and returns, taking two arguments in addition to the standard ones.
12072 *
12073 * @param a_pfnCImpl The pointer to the C routine.
12074 * @param a0 The first extra argument.
12075 * @param a1 The second extra argument.
12076 * @param a2 The third extra argument.
12077 * @param a3 The fourth extra argument.
12078 * @param a4 The fifth extra argument.
12079 */
12080#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12081
12082/**
12083 * Defers the entire instruction emulation to a C implementation routine and
12084 * returns, only taking the standard parameters.
12085 *
12086 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12087 *
12088 * @param a_pfnCImpl The pointer to the C routine.
12089 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12090 */
12091#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12092
12093/**
12094 * Defers the entire instruction emulation to a C implementation routine and
12095 * returns, taking one argument in addition to the standard ones.
12096 *
12097 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12098 *
12099 * @param a_pfnCImpl The pointer to the C routine.
12100 * @param a0 The argument.
12101 */
12102#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12103
12104/**
12105 * Defers the entire instruction emulation to a C implementation routine and
12106 * returns, taking two arguments in addition to the standard ones.
12107 *
12108 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12109 *
12110 * @param a_pfnCImpl The pointer to the C routine.
12111 * @param a0 The first extra argument.
12112 * @param a1 The second extra argument.
12113 */
12114#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12115
12116/**
12117 * Defers the entire instruction emulation to a C implementation routine and
12118 * returns, taking three arguments in addition to the standard ones.
12119 *
12120 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12121 *
12122 * @param a_pfnCImpl The pointer to the C routine.
12123 * @param a0 The first extra argument.
12124 * @param a1 The second extra argument.
12125 * @param a2 The third extra argument.
12126 */
12127#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12128
12129/**
12130 * Calls a FPU assembly implementation taking one visible argument.
12131 *
12132 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12133 * @param a0 The first extra argument.
12134 */
12135#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12136 do { \
12137 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12138 } while (0)
12139
12140/**
12141 * Calls a FPU assembly implementation taking two visible arguments.
12142 *
12143 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12144 * @param a0 The first extra argument.
12145 * @param a1 The second extra argument.
12146 */
12147#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12148 do { \
12149 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12150 } while (0)
12151
12152/**
12153 * Calls a FPU assembly implementation taking three visible arguments.
12154 *
12155 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12156 * @param a0 The first extra argument.
12157 * @param a1 The second extra argument.
12158 * @param a2 The third extra argument.
12159 */
12160#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12161 do { \
12162 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12163 } while (0)
12164
12165#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12166 do { \
12167 (a_FpuData).FSW = (a_FSW); \
12168 (a_FpuData).r80Result = *(a_pr80Value); \
12169 } while (0)
12170
12171/** Pushes FPU result onto the stack. */
12172#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12173 iemFpuPushResult(pVCpu, &a_FpuData)
12174/** Pushes FPU result onto the stack and sets the FPUDP. */
12175#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12176 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12177
12178/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12179#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12180 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12181
12182/** Stores FPU result in a stack register. */
12183#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12184 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12185/** Stores FPU result in a stack register and pops the stack. */
12186#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12187 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12188/** Stores FPU result in a stack register and sets the FPUDP. */
12189#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12190 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12191/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12192 * stack. */
12193#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12194 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12195
12196/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12197#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12198 iemFpuUpdateOpcodeAndIp(pVCpu)
12199/** Free a stack register (for FFREE and FFREEP). */
12200#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12201 iemFpuStackFree(pVCpu, a_iStReg)
12202/** Increment the FPU stack pointer. */
12203#define IEM_MC_FPU_STACK_INC_TOP() \
12204 iemFpuStackIncTop(pVCpu)
12205/** Decrement the FPU stack pointer. */
12206#define IEM_MC_FPU_STACK_DEC_TOP() \
12207 iemFpuStackDecTop(pVCpu)
12208
12209/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12210#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12211 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12212/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12213#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12214 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12215/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12216#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12217 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12218/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12219#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12220 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12221/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12222 * stack. */
12223#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12224 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12225/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12226#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12227 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12228
12229/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12230#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12231 iemFpuStackUnderflow(pVCpu, a_iStDst)
12232/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12233 * stack. */
12234#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12235 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12236/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12237 * FPUDS. */
12238#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12239 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12240/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12241 * FPUDS. Pops stack. */
12242#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12243 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12244/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12245 * stack twice. */
12246#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12247 iemFpuStackUnderflowThenPopPop(pVCpu)
12248/** Raises a FPU stack underflow exception for an instruction pushing a result
12249 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12250#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12251 iemFpuStackPushUnderflow(pVCpu)
12252/** Raises a FPU stack underflow exception for an instruction pushing a result
12253 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12254#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12255 iemFpuStackPushUnderflowTwo(pVCpu)
12256
12257/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12258 * FPUIP, FPUCS and FOP. */
12259#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12260 iemFpuStackPushOverflow(pVCpu)
12261/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12262 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12263#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12264 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12265/** Prepares for using the FPU state.
12266 * Ensures that we can use the host FPU in the current context (RC+R0.
12267 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12268#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12269/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12270#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12271/** Actualizes the guest FPU state so it can be accessed and modified. */
12272#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12273
12274/** Prepares for using the SSE state.
12275 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12276 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12277#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12278/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12279#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12280/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12281#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12282
12283/** Prepares for using the AVX state.
12284 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12285 * Ensures the guest AVX state in the CPUMCTX is up to date.
12286 * @note This will include the AVX512 state too when support for it is added
12287 * due to the zero extending feature of VEX instruction. */
12288#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12289/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12290#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12291/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12292#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12293
12294/**
12295 * Calls a MMX assembly implementation taking two visible arguments.
12296 *
12297 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12298 * @param a0 The first extra argument.
12299 * @param a1 The second extra argument.
12300 */
12301#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12302 do { \
12303 IEM_MC_PREPARE_FPU_USAGE(); \
12304 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12305 } while (0)
12306
12307/**
12308 * Calls a MMX assembly implementation taking three visible arguments.
12309 *
12310 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12311 * @param a0 The first extra argument.
12312 * @param a1 The second extra argument.
12313 * @param a2 The third extra argument.
12314 */
12315#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12316 do { \
12317 IEM_MC_PREPARE_FPU_USAGE(); \
12318 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12319 } while (0)
12320
12321
12322/**
12323 * Calls a SSE assembly implementation taking two visible arguments.
12324 *
12325 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12326 * @param a0 The first extra argument.
12327 * @param a1 The second extra argument.
12328 */
12329#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12330 do { \
12331 IEM_MC_PREPARE_SSE_USAGE(); \
12332 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12333 } while (0)
12334
12335/**
12336 * Calls a SSE assembly implementation taking three visible arguments.
12337 *
12338 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12339 * @param a0 The first extra argument.
12340 * @param a1 The second extra argument.
12341 * @param a2 The third extra argument.
12342 */
12343#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12344 do { \
12345 IEM_MC_PREPARE_SSE_USAGE(); \
12346 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12347 } while (0)
12348
12349
12350/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12351 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12352#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12353 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12354
12355/**
12356 * Calls a AVX assembly implementation taking two visible arguments.
12357 *
12358 * There is one implicit zero'th argument, a pointer to the extended state.
12359 *
12360 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12361 * @param a1 The first extra argument.
12362 * @param a2 The second extra argument.
12363 */
12364#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12365 do { \
12366 IEM_MC_PREPARE_AVX_USAGE(); \
12367 a_pfnAImpl(pXState, (a1), (a2)); \
12368 } while (0)
12369
12370/**
12371 * Calls a AVX assembly implementation taking three visible arguments.
12372 *
12373 * There is one implicit zero'th argument, a pointer to the extended state.
12374 *
12375 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12376 * @param a1 The first extra argument.
12377 * @param a2 The second extra argument.
12378 * @param a3 The third extra argument.
12379 */
12380#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12381 do { \
12382 IEM_MC_PREPARE_AVX_USAGE(); \
12383 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12384 } while (0)
12385
12386/** @note Not for IOPL or IF testing. */
12387#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12388/** @note Not for IOPL or IF testing. */
12389#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12390/** @note Not for IOPL or IF testing. */
12391#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12392/** @note Not for IOPL or IF testing. */
12393#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12394/** @note Not for IOPL or IF testing. */
12395#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12396 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12397 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12398/** @note Not for IOPL or IF testing. */
12399#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12400 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12401 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12402/** @note Not for IOPL or IF testing. */
12403#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12404 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12405 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12406 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12407/** @note Not for IOPL or IF testing. */
12408#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12409 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12410 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12411 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12412#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12413#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12414#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12417 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12418 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12421 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12422 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12425 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12426 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12429 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12430 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12433 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12434 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12437 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12438 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12439#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12440#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12441
12442#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12443 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12444#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12445 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12446#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12447 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12448#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12449 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12450#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12451 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12452#define IEM_MC_IF_FCW_IM() \
12453 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12454
12455#define IEM_MC_ELSE() } else {
12456#define IEM_MC_ENDIF() } do {} while (0)
12457
12458/** @} */
12459
12460
12461/** @name Opcode Debug Helpers.
12462 * @{
12463 */
12464#ifdef VBOX_WITH_STATISTICS
12465# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12466#else
12467# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12468#endif
12469
12470#ifdef DEBUG
12471# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12472 do { \
12473 IEMOP_INC_STATS(a_Stats); \
12474 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12475 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12476 } while (0)
12477
12478# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12479 do { \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12481 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12482 (void)RT_CONCAT(OP_,a_Upper); \
12483 (void)(a_fDisHints); \
12484 (void)(a_fIemHints); \
12485 } while (0)
12486
12487# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12488 do { \
12489 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12490 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12491 (void)RT_CONCAT(OP_,a_Upper); \
12492 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12493 (void)(a_fDisHints); \
12494 (void)(a_fIemHints); \
12495 } while (0)
12496
12497# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12498 do { \
12499 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12500 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12501 (void)RT_CONCAT(OP_,a_Upper); \
12502 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12503 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12504 (void)(a_fDisHints); \
12505 (void)(a_fIemHints); \
12506 } while (0)
12507
12508# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12509 do { \
12510 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12511 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12512 (void)RT_CONCAT(OP_,a_Upper); \
12513 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12514 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12515 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12516 (void)(a_fDisHints); \
12517 (void)(a_fIemHints); \
12518 } while (0)
12519
12520# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12521 do { \
12522 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12523 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12524 (void)RT_CONCAT(OP_,a_Upper); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12526 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12527 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12528 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12529 (void)(a_fDisHints); \
12530 (void)(a_fIemHints); \
12531 } while (0)
12532
12533#else
12534# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12535
12536# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12537 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12538# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12539 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12540# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12541 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12542# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12543 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12544# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12545 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12546
12547#endif
12548
12549#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12550 IEMOP_MNEMONIC0EX(a_Lower, \
12551 #a_Lower, \
12552 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12553#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12555 #a_Lower " " #a_Op1, \
12556 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12557#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12558 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12559 #a_Lower " " #a_Op1 "," #a_Op2, \
12560 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12561#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12563 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12564 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12565#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12567 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12568 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12569
12570/** @} */
12571
12572
12573/** @name Opcode Helpers.
12574 * @{
12575 */
12576
12577#ifdef IN_RING3
12578# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12579 do { \
12580 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12581 else \
12582 { \
12583 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12584 return IEMOP_RAISE_INVALID_OPCODE(); \
12585 } \
12586 } while (0)
12587#else
12588# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12589 do { \
12590 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12591 else return IEMOP_RAISE_INVALID_OPCODE(); \
12592 } while (0)
12593#endif
12594
12595/** The instruction requires a 186 or later. */
12596#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12597# define IEMOP_HLP_MIN_186() do { } while (0)
12598#else
12599# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12600#endif
12601
12602/** The instruction requires a 286 or later. */
12603#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12604# define IEMOP_HLP_MIN_286() do { } while (0)
12605#else
12606# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12607#endif
12608
12609/** The instruction requires a 386 or later. */
12610#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12611# define IEMOP_HLP_MIN_386() do { } while (0)
12612#else
12613# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12614#endif
12615
12616/** The instruction requires a 386 or later if the given expression is true. */
12617#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12618# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12619#else
12620# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12621#endif
12622
12623/** The instruction requires a 486 or later. */
12624#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12625# define IEMOP_HLP_MIN_486() do { } while (0)
12626#else
12627# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12628#endif
12629
12630/** The instruction requires a Pentium (586) or later. */
12631#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12632# define IEMOP_HLP_MIN_586() do { } while (0)
12633#else
12634# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12635#endif
12636
12637/** The instruction requires a PentiumPro (686) or later. */
12638#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12639# define IEMOP_HLP_MIN_686() do { } while (0)
12640#else
12641# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12642#endif
12643
12644
12645/** The instruction raises an \#UD in real and V8086 mode. */
12646#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12647 do \
12648 { \
12649 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12650 else return IEMOP_RAISE_INVALID_OPCODE(); \
12651 } while (0)
12652
12653/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12654 * 64-bit mode. */
12655#define IEMOP_HLP_NO_64BIT() \
12656 do \
12657 { \
12658 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12659 return IEMOP_RAISE_INVALID_OPCODE(); \
12660 } while (0)
12661
12662/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12663 * 64-bit mode. */
12664#define IEMOP_HLP_ONLY_64BIT() \
12665 do \
12666 { \
12667 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12668 return IEMOP_RAISE_INVALID_OPCODE(); \
12669 } while (0)
12670
12671/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12672#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12673 do \
12674 { \
12675 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12676 iemRecalEffOpSize64Default(pVCpu); \
12677 } while (0)
12678
12679/** The instruction has 64-bit operand size if 64-bit mode. */
12680#define IEMOP_HLP_64BIT_OP_SIZE() \
12681 do \
12682 { \
12683 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12684 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12685 } while (0)
12686
12687/** Only a REX prefix immediately preceeding the first opcode byte takes
12688 * effect. This macro helps ensuring this as well as logging bad guest code. */
12689#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12690 do \
12691 { \
12692 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12693 { \
12694 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12695 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12696 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12697 pVCpu->iem.s.uRexB = 0; \
12698 pVCpu->iem.s.uRexIndex = 0; \
12699 pVCpu->iem.s.uRexReg = 0; \
12700 iemRecalEffOpSize(pVCpu); \
12701 } \
12702 } while (0)
12703
12704/**
12705 * Done decoding.
12706 */
12707#define IEMOP_HLP_DONE_DECODING() \
12708 do \
12709 { \
12710 /*nothing for now, maybe later... */ \
12711 } while (0)
12712
12713/**
12714 * Done decoding, raise \#UD exception if lock prefix present.
12715 */
12716#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12717 do \
12718 { \
12719 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12720 { /* likely */ } \
12721 else \
12722 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12723 } while (0)
12724
12725
12726/**
12727 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12728 * repnz or size prefixes are present, or if in real or v8086 mode.
12729 */
12730#define IEMOP_HLP_DONE_VEX_DECODING() \
12731 do \
12732 { \
12733 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12734 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12735 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12736 { /* likely */ } \
12737 else \
12738 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12739 } while (0)
12740
12741/**
12742 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12743 * repnz or size prefixes are present, or if in real or v8086 mode.
12744 */
12745#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12746 do \
12747 { \
12748 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12749 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12750 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12751 && pVCpu->iem.s.uVexLength == 0)) \
12752 { /* likely */ } \
12753 else \
12754 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12755 } while (0)
12756
12757
12758/**
12759 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12760 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12761 * register 0, or if in real or v8086 mode.
12762 */
12763#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12764 do \
12765 { \
12766 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12767 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12768 && !pVCpu->iem.s.uVex3rdReg \
12769 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12770 { /* likely */ } \
12771 else \
12772 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12773 } while (0)
12774
12775/**
12776 * Done decoding VEX, no V, L=0.
12777 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12778 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12779 */
12780#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12781 do \
12782 { \
12783 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12784 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12785 && pVCpu->iem.s.uVexLength == 0 \
12786 && pVCpu->iem.s.uVex3rdReg == 0 \
12787 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12788 { /* likely */ } \
12789 else \
12790 return IEMOP_RAISE_INVALID_OPCODE(); \
12791 } while (0)
12792
12793#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12794 do \
12795 { \
12796 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12797 { /* likely */ } \
12798 else \
12799 { \
12800 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12801 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12802 } \
12803 } while (0)
12804#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12805 do \
12806 { \
12807 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12808 { /* likely */ } \
12809 else \
12810 { \
12811 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12812 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12813 } \
12814 } while (0)
12815
12816/**
12817 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12818 * are present.
12819 */
12820#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12821 do \
12822 { \
12823 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12824 { /* likely */ } \
12825 else \
12826 return IEMOP_RAISE_INVALID_OPCODE(); \
12827 } while (0)
12828
12829
12830#ifdef VBOX_WITH_NESTED_HWVIRT
12831/** Check and handles SVM nested-guest control & instruction intercept. */
12832# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12833 do \
12834 { \
12835 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12836 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12837 } while (0)
12838
12839/** Check and handle SVM nested-guest CR0 read intercept. */
12840# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12841 do \
12842 { \
12843 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12844 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12845 } while (0)
12846
12847#else /* !VBOX_WITH_NESTED_HWVIRT */
12848# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12849# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12850#endif /* !VBOX_WITH_NESTED_HWVIRT */
12851
12852
12853/**
12854 * Calculates the effective address of a ModR/M memory operand.
12855 *
12856 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12857 *
12858 * @return Strict VBox status code.
12859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12860 * @param bRm The ModRM byte.
12861 * @param cbImm The size of any immediate following the
12862 * effective address opcode bytes. Important for
12863 * RIP relative addressing.
12864 * @param pGCPtrEff Where to return the effective address.
12865 */
12866IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12867{
12868 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12869 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12870# define SET_SS_DEF() \
12871 do \
12872 { \
12873 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12874 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12875 } while (0)
12876
12877 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12878 {
12879/** @todo Check the effective address size crap! */
12880 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12881 {
12882 uint16_t u16EffAddr;
12883
12884 /* Handle the disp16 form with no registers first. */
12885 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12886 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12887 else
12888 {
12889 /* Get the displacment. */
12890 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12891 {
12892 case 0: u16EffAddr = 0; break;
12893 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12894 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12895 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12896 }
12897
12898 /* Add the base and index registers to the disp. */
12899 switch (bRm & X86_MODRM_RM_MASK)
12900 {
12901 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12902 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12903 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12904 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12905 case 4: u16EffAddr += pCtx->si; break;
12906 case 5: u16EffAddr += pCtx->di; break;
12907 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12908 case 7: u16EffAddr += pCtx->bx; break;
12909 }
12910 }
12911
12912 *pGCPtrEff = u16EffAddr;
12913 }
12914 else
12915 {
12916 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12917 uint32_t u32EffAddr;
12918
12919 /* Handle the disp32 form with no registers first. */
12920 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12921 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12922 else
12923 {
12924 /* Get the register (or SIB) value. */
12925 switch ((bRm & X86_MODRM_RM_MASK))
12926 {
12927 case 0: u32EffAddr = pCtx->eax; break;
12928 case 1: u32EffAddr = pCtx->ecx; break;
12929 case 2: u32EffAddr = pCtx->edx; break;
12930 case 3: u32EffAddr = pCtx->ebx; break;
12931 case 4: /* SIB */
12932 {
12933 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12934
12935 /* Get the index and scale it. */
12936 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12937 {
12938 case 0: u32EffAddr = pCtx->eax; break;
12939 case 1: u32EffAddr = pCtx->ecx; break;
12940 case 2: u32EffAddr = pCtx->edx; break;
12941 case 3: u32EffAddr = pCtx->ebx; break;
12942 case 4: u32EffAddr = 0; /*none */ break;
12943 case 5: u32EffAddr = pCtx->ebp; break;
12944 case 6: u32EffAddr = pCtx->esi; break;
12945 case 7: u32EffAddr = pCtx->edi; break;
12946 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12947 }
12948 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12949
12950 /* add base */
12951 switch (bSib & X86_SIB_BASE_MASK)
12952 {
12953 case 0: u32EffAddr += pCtx->eax; break;
12954 case 1: u32EffAddr += pCtx->ecx; break;
12955 case 2: u32EffAddr += pCtx->edx; break;
12956 case 3: u32EffAddr += pCtx->ebx; break;
12957 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12958 case 5:
12959 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12960 {
12961 u32EffAddr += pCtx->ebp;
12962 SET_SS_DEF();
12963 }
12964 else
12965 {
12966 uint32_t u32Disp;
12967 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12968 u32EffAddr += u32Disp;
12969 }
12970 break;
12971 case 6: u32EffAddr += pCtx->esi; break;
12972 case 7: u32EffAddr += pCtx->edi; break;
12973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12974 }
12975 break;
12976 }
12977 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12978 case 6: u32EffAddr = pCtx->esi; break;
12979 case 7: u32EffAddr = pCtx->edi; break;
12980 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12981 }
12982
12983 /* Get and add the displacement. */
12984 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12985 {
12986 case 0:
12987 break;
12988 case 1:
12989 {
12990 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12991 u32EffAddr += i8Disp;
12992 break;
12993 }
12994 case 2:
12995 {
12996 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12997 u32EffAddr += u32Disp;
12998 break;
12999 }
13000 default:
13001 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13002 }
13003
13004 }
13005 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13006 *pGCPtrEff = u32EffAddr;
13007 else
13008 {
13009 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13010 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13011 }
13012 }
13013 }
13014 else
13015 {
13016 uint64_t u64EffAddr;
13017
13018 /* Handle the rip+disp32 form with no registers first. */
13019 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13020 {
13021 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13022 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13023 }
13024 else
13025 {
13026 /* Get the register (or SIB) value. */
13027 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13028 {
13029 case 0: u64EffAddr = pCtx->rax; break;
13030 case 1: u64EffAddr = pCtx->rcx; break;
13031 case 2: u64EffAddr = pCtx->rdx; break;
13032 case 3: u64EffAddr = pCtx->rbx; break;
13033 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13034 case 6: u64EffAddr = pCtx->rsi; break;
13035 case 7: u64EffAddr = pCtx->rdi; break;
13036 case 8: u64EffAddr = pCtx->r8; break;
13037 case 9: u64EffAddr = pCtx->r9; break;
13038 case 10: u64EffAddr = pCtx->r10; break;
13039 case 11: u64EffAddr = pCtx->r11; break;
13040 case 13: u64EffAddr = pCtx->r13; break;
13041 case 14: u64EffAddr = pCtx->r14; break;
13042 case 15: u64EffAddr = pCtx->r15; break;
13043 /* SIB */
13044 case 4:
13045 case 12:
13046 {
13047 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13048
13049 /* Get the index and scale it. */
13050 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13051 {
13052 case 0: u64EffAddr = pCtx->rax; break;
13053 case 1: u64EffAddr = pCtx->rcx; break;
13054 case 2: u64EffAddr = pCtx->rdx; break;
13055 case 3: u64EffAddr = pCtx->rbx; break;
13056 case 4: u64EffAddr = 0; /*none */ break;
13057 case 5: u64EffAddr = pCtx->rbp; break;
13058 case 6: u64EffAddr = pCtx->rsi; break;
13059 case 7: u64EffAddr = pCtx->rdi; break;
13060 case 8: u64EffAddr = pCtx->r8; break;
13061 case 9: u64EffAddr = pCtx->r9; break;
13062 case 10: u64EffAddr = pCtx->r10; break;
13063 case 11: u64EffAddr = pCtx->r11; break;
13064 case 12: u64EffAddr = pCtx->r12; break;
13065 case 13: u64EffAddr = pCtx->r13; break;
13066 case 14: u64EffAddr = pCtx->r14; break;
13067 case 15: u64EffAddr = pCtx->r15; break;
13068 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13069 }
13070 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13071
13072 /* add base */
13073 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13074 {
13075 case 0: u64EffAddr += pCtx->rax; break;
13076 case 1: u64EffAddr += pCtx->rcx; break;
13077 case 2: u64EffAddr += pCtx->rdx; break;
13078 case 3: u64EffAddr += pCtx->rbx; break;
13079 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13080 case 6: u64EffAddr += pCtx->rsi; break;
13081 case 7: u64EffAddr += pCtx->rdi; break;
13082 case 8: u64EffAddr += pCtx->r8; break;
13083 case 9: u64EffAddr += pCtx->r9; break;
13084 case 10: u64EffAddr += pCtx->r10; break;
13085 case 11: u64EffAddr += pCtx->r11; break;
13086 case 12: u64EffAddr += pCtx->r12; break;
13087 case 14: u64EffAddr += pCtx->r14; break;
13088 case 15: u64EffAddr += pCtx->r15; break;
13089 /* complicated encodings */
13090 case 5:
13091 case 13:
13092 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13093 {
13094 if (!pVCpu->iem.s.uRexB)
13095 {
13096 u64EffAddr += pCtx->rbp;
13097 SET_SS_DEF();
13098 }
13099 else
13100 u64EffAddr += pCtx->r13;
13101 }
13102 else
13103 {
13104 uint32_t u32Disp;
13105 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13106 u64EffAddr += (int32_t)u32Disp;
13107 }
13108 break;
13109 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13110 }
13111 break;
13112 }
13113 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13114 }
13115
13116 /* Get and add the displacement. */
13117 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13118 {
13119 case 0:
13120 break;
13121 case 1:
13122 {
13123 int8_t i8Disp;
13124 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13125 u64EffAddr += i8Disp;
13126 break;
13127 }
13128 case 2:
13129 {
13130 uint32_t u32Disp;
13131 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13132 u64EffAddr += (int32_t)u32Disp;
13133 break;
13134 }
13135 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13136 }
13137
13138 }
13139
13140 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13141 *pGCPtrEff = u64EffAddr;
13142 else
13143 {
13144 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13145 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13146 }
13147 }
13148
13149 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13150 return VINF_SUCCESS;
13151}
13152
13153
13154/**
13155 * Calculates the effective address of a ModR/M memory operand.
13156 *
13157 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13158 *
13159 * @return Strict VBox status code.
13160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13161 * @param bRm The ModRM byte.
13162 * @param cbImm The size of any immediate following the
13163 * effective address opcode bytes. Important for
13164 * RIP relative addressing.
13165 * @param pGCPtrEff Where to return the effective address.
13166 * @param offRsp RSP displacement.
13167 */
13168IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13169{
13170 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13171 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13172# define SET_SS_DEF() \
13173 do \
13174 { \
13175 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13176 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13177 } while (0)
13178
13179 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13180 {
13181/** @todo Check the effective address size crap! */
13182 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13183 {
13184 uint16_t u16EffAddr;
13185
13186 /* Handle the disp16 form with no registers first. */
13187 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13188 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13189 else
13190 {
13191 /* Get the displacment. */
13192 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13193 {
13194 case 0: u16EffAddr = 0; break;
13195 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13196 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13197 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13198 }
13199
13200 /* Add the base and index registers to the disp. */
13201 switch (bRm & X86_MODRM_RM_MASK)
13202 {
13203 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13204 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13205 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13206 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13207 case 4: u16EffAddr += pCtx->si; break;
13208 case 5: u16EffAddr += pCtx->di; break;
13209 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13210 case 7: u16EffAddr += pCtx->bx; break;
13211 }
13212 }
13213
13214 *pGCPtrEff = u16EffAddr;
13215 }
13216 else
13217 {
13218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13219 uint32_t u32EffAddr;
13220
13221 /* Handle the disp32 form with no registers first. */
13222 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13223 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13224 else
13225 {
13226 /* Get the register (or SIB) value. */
13227 switch ((bRm & X86_MODRM_RM_MASK))
13228 {
13229 case 0: u32EffAddr = pCtx->eax; break;
13230 case 1: u32EffAddr = pCtx->ecx; break;
13231 case 2: u32EffAddr = pCtx->edx; break;
13232 case 3: u32EffAddr = pCtx->ebx; break;
13233 case 4: /* SIB */
13234 {
13235 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13236
13237 /* Get the index and scale it. */
13238 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13239 {
13240 case 0: u32EffAddr = pCtx->eax; break;
13241 case 1: u32EffAddr = pCtx->ecx; break;
13242 case 2: u32EffAddr = pCtx->edx; break;
13243 case 3: u32EffAddr = pCtx->ebx; break;
13244 case 4: u32EffAddr = 0; /*none */ break;
13245 case 5: u32EffAddr = pCtx->ebp; break;
13246 case 6: u32EffAddr = pCtx->esi; break;
13247 case 7: u32EffAddr = pCtx->edi; break;
13248 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13249 }
13250 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13251
13252 /* add base */
13253 switch (bSib & X86_SIB_BASE_MASK)
13254 {
13255 case 0: u32EffAddr += pCtx->eax; break;
13256 case 1: u32EffAddr += pCtx->ecx; break;
13257 case 2: u32EffAddr += pCtx->edx; break;
13258 case 3: u32EffAddr += pCtx->ebx; break;
13259 case 4:
13260 u32EffAddr += pCtx->esp + offRsp;
13261 SET_SS_DEF();
13262 break;
13263 case 5:
13264 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13265 {
13266 u32EffAddr += pCtx->ebp;
13267 SET_SS_DEF();
13268 }
13269 else
13270 {
13271 uint32_t u32Disp;
13272 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13273 u32EffAddr += u32Disp;
13274 }
13275 break;
13276 case 6: u32EffAddr += pCtx->esi; break;
13277 case 7: u32EffAddr += pCtx->edi; break;
13278 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13279 }
13280 break;
13281 }
13282 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13283 case 6: u32EffAddr = pCtx->esi; break;
13284 case 7: u32EffAddr = pCtx->edi; break;
13285 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13286 }
13287
13288 /* Get and add the displacement. */
13289 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13290 {
13291 case 0:
13292 break;
13293 case 1:
13294 {
13295 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13296 u32EffAddr += i8Disp;
13297 break;
13298 }
13299 case 2:
13300 {
13301 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13302 u32EffAddr += u32Disp;
13303 break;
13304 }
13305 default:
13306 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13307 }
13308
13309 }
13310 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13311 *pGCPtrEff = u32EffAddr;
13312 else
13313 {
13314 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13315 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13316 }
13317 }
13318 }
13319 else
13320 {
13321 uint64_t u64EffAddr;
13322
13323 /* Handle the rip+disp32 form with no registers first. */
13324 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13325 {
13326 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13327 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13328 }
13329 else
13330 {
13331 /* Get the register (or SIB) value. */
13332 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13333 {
13334 case 0: u64EffAddr = pCtx->rax; break;
13335 case 1: u64EffAddr = pCtx->rcx; break;
13336 case 2: u64EffAddr = pCtx->rdx; break;
13337 case 3: u64EffAddr = pCtx->rbx; break;
13338 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13339 case 6: u64EffAddr = pCtx->rsi; break;
13340 case 7: u64EffAddr = pCtx->rdi; break;
13341 case 8: u64EffAddr = pCtx->r8; break;
13342 case 9: u64EffAddr = pCtx->r9; break;
13343 case 10: u64EffAddr = pCtx->r10; break;
13344 case 11: u64EffAddr = pCtx->r11; break;
13345 case 13: u64EffAddr = pCtx->r13; break;
13346 case 14: u64EffAddr = pCtx->r14; break;
13347 case 15: u64EffAddr = pCtx->r15; break;
13348 /* SIB */
13349 case 4:
13350 case 12:
13351 {
13352 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13353
13354 /* Get the index and scale it. */
13355 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13356 {
13357 case 0: u64EffAddr = pCtx->rax; break;
13358 case 1: u64EffAddr = pCtx->rcx; break;
13359 case 2: u64EffAddr = pCtx->rdx; break;
13360 case 3: u64EffAddr = pCtx->rbx; break;
13361 case 4: u64EffAddr = 0; /*none */ break;
13362 case 5: u64EffAddr = pCtx->rbp; break;
13363 case 6: u64EffAddr = pCtx->rsi; break;
13364 case 7: u64EffAddr = pCtx->rdi; break;
13365 case 8: u64EffAddr = pCtx->r8; break;
13366 case 9: u64EffAddr = pCtx->r9; break;
13367 case 10: u64EffAddr = pCtx->r10; break;
13368 case 11: u64EffAddr = pCtx->r11; break;
13369 case 12: u64EffAddr = pCtx->r12; break;
13370 case 13: u64EffAddr = pCtx->r13; break;
13371 case 14: u64EffAddr = pCtx->r14; break;
13372 case 15: u64EffAddr = pCtx->r15; break;
13373 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13374 }
13375 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13376
13377 /* add base */
13378 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13379 {
13380 case 0: u64EffAddr += pCtx->rax; break;
13381 case 1: u64EffAddr += pCtx->rcx; break;
13382 case 2: u64EffAddr += pCtx->rdx; break;
13383 case 3: u64EffAddr += pCtx->rbx; break;
13384 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13385 case 6: u64EffAddr += pCtx->rsi; break;
13386 case 7: u64EffAddr += pCtx->rdi; break;
13387 case 8: u64EffAddr += pCtx->r8; break;
13388 case 9: u64EffAddr += pCtx->r9; break;
13389 case 10: u64EffAddr += pCtx->r10; break;
13390 case 11: u64EffAddr += pCtx->r11; break;
13391 case 12: u64EffAddr += pCtx->r12; break;
13392 case 14: u64EffAddr += pCtx->r14; break;
13393 case 15: u64EffAddr += pCtx->r15; break;
13394 /* complicated encodings */
13395 case 5:
13396 case 13:
13397 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13398 {
13399 if (!pVCpu->iem.s.uRexB)
13400 {
13401 u64EffAddr += pCtx->rbp;
13402 SET_SS_DEF();
13403 }
13404 else
13405 u64EffAddr += pCtx->r13;
13406 }
13407 else
13408 {
13409 uint32_t u32Disp;
13410 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13411 u64EffAddr += (int32_t)u32Disp;
13412 }
13413 break;
13414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13415 }
13416 break;
13417 }
13418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13419 }
13420
13421 /* Get and add the displacement. */
13422 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13423 {
13424 case 0:
13425 break;
13426 case 1:
13427 {
13428 int8_t i8Disp;
13429 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13430 u64EffAddr += i8Disp;
13431 break;
13432 }
13433 case 2:
13434 {
13435 uint32_t u32Disp;
13436 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13437 u64EffAddr += (int32_t)u32Disp;
13438 break;
13439 }
13440 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13441 }
13442
13443 }
13444
13445 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13446 *pGCPtrEff = u64EffAddr;
13447 else
13448 {
13449 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13450 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13451 }
13452 }
13453
13454 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13455 return VINF_SUCCESS;
13456}
13457
13458
13459#ifdef IEM_WITH_SETJMP
13460/**
13461 * Calculates the effective address of a ModR/M memory operand.
13462 *
13463 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13464 *
13465 * May longjmp on internal error.
13466 *
13467 * @return The effective address.
13468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13469 * @param bRm The ModRM byte.
13470 * @param cbImm The size of any immediate following the
13471 * effective address opcode bytes. Important for
13472 * RIP relative addressing.
13473 */
13474IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13475{
13476 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13477 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13478# define SET_SS_DEF() \
13479 do \
13480 { \
13481 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13482 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13483 } while (0)
13484
13485 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13486 {
13487/** @todo Check the effective address size crap! */
13488 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13489 {
13490 uint16_t u16EffAddr;
13491
13492 /* Handle the disp16 form with no registers first. */
13493 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13494 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13495 else
13496 {
13497 /* Get the displacment. */
13498 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13499 {
13500 case 0: u16EffAddr = 0; break;
13501 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13502 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13503 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13504 }
13505
13506 /* Add the base and index registers to the disp. */
13507 switch (bRm & X86_MODRM_RM_MASK)
13508 {
13509 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13510 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13511 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13512 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13513 case 4: u16EffAddr += pCtx->si; break;
13514 case 5: u16EffAddr += pCtx->di; break;
13515 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13516 case 7: u16EffAddr += pCtx->bx; break;
13517 }
13518 }
13519
13520 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13521 return u16EffAddr;
13522 }
13523
13524 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13525 uint32_t u32EffAddr;
13526
13527 /* Handle the disp32 form with no registers first. */
13528 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13529 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13530 else
13531 {
13532 /* Get the register (or SIB) value. */
13533 switch ((bRm & X86_MODRM_RM_MASK))
13534 {
13535 case 0: u32EffAddr = pCtx->eax; break;
13536 case 1: u32EffAddr = pCtx->ecx; break;
13537 case 2: u32EffAddr = pCtx->edx; break;
13538 case 3: u32EffAddr = pCtx->ebx; break;
13539 case 4: /* SIB */
13540 {
13541 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13542
13543 /* Get the index and scale it. */
13544 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13545 {
13546 case 0: u32EffAddr = pCtx->eax; break;
13547 case 1: u32EffAddr = pCtx->ecx; break;
13548 case 2: u32EffAddr = pCtx->edx; break;
13549 case 3: u32EffAddr = pCtx->ebx; break;
13550 case 4: u32EffAddr = 0; /*none */ break;
13551 case 5: u32EffAddr = pCtx->ebp; break;
13552 case 6: u32EffAddr = pCtx->esi; break;
13553 case 7: u32EffAddr = pCtx->edi; break;
13554 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13555 }
13556 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13557
13558 /* add base */
13559 switch (bSib & X86_SIB_BASE_MASK)
13560 {
13561 case 0: u32EffAddr += pCtx->eax; break;
13562 case 1: u32EffAddr += pCtx->ecx; break;
13563 case 2: u32EffAddr += pCtx->edx; break;
13564 case 3: u32EffAddr += pCtx->ebx; break;
13565 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13566 case 5:
13567 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13568 {
13569 u32EffAddr += pCtx->ebp;
13570 SET_SS_DEF();
13571 }
13572 else
13573 {
13574 uint32_t u32Disp;
13575 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13576 u32EffAddr += u32Disp;
13577 }
13578 break;
13579 case 6: u32EffAddr += pCtx->esi; break;
13580 case 7: u32EffAddr += pCtx->edi; break;
13581 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13582 }
13583 break;
13584 }
13585 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13586 case 6: u32EffAddr = pCtx->esi; break;
13587 case 7: u32EffAddr = pCtx->edi; break;
13588 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13589 }
13590
13591 /* Get and add the displacement. */
13592 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13593 {
13594 case 0:
13595 break;
13596 case 1:
13597 {
13598 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13599 u32EffAddr += i8Disp;
13600 break;
13601 }
13602 case 2:
13603 {
13604 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13605 u32EffAddr += u32Disp;
13606 break;
13607 }
13608 default:
13609 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13610 }
13611 }
13612
13613 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13614 {
13615 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13616 return u32EffAddr;
13617 }
13618 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13619 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13620 return u32EffAddr & UINT16_MAX;
13621 }
13622
13623 uint64_t u64EffAddr;
13624
13625 /* Handle the rip+disp32 form with no registers first. */
13626 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13627 {
13628 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13629 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13630 }
13631 else
13632 {
13633 /* Get the register (or SIB) value. */
13634 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13635 {
13636 case 0: u64EffAddr = pCtx->rax; break;
13637 case 1: u64EffAddr = pCtx->rcx; break;
13638 case 2: u64EffAddr = pCtx->rdx; break;
13639 case 3: u64EffAddr = pCtx->rbx; break;
13640 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13641 case 6: u64EffAddr = pCtx->rsi; break;
13642 case 7: u64EffAddr = pCtx->rdi; break;
13643 case 8: u64EffAddr = pCtx->r8; break;
13644 case 9: u64EffAddr = pCtx->r9; break;
13645 case 10: u64EffAddr = pCtx->r10; break;
13646 case 11: u64EffAddr = pCtx->r11; break;
13647 case 13: u64EffAddr = pCtx->r13; break;
13648 case 14: u64EffAddr = pCtx->r14; break;
13649 case 15: u64EffAddr = pCtx->r15; break;
13650 /* SIB */
13651 case 4:
13652 case 12:
13653 {
13654 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13655
13656 /* Get the index and scale it. */
13657 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13658 {
13659 case 0: u64EffAddr = pCtx->rax; break;
13660 case 1: u64EffAddr = pCtx->rcx; break;
13661 case 2: u64EffAddr = pCtx->rdx; break;
13662 case 3: u64EffAddr = pCtx->rbx; break;
13663 case 4: u64EffAddr = 0; /*none */ break;
13664 case 5: u64EffAddr = pCtx->rbp; break;
13665 case 6: u64EffAddr = pCtx->rsi; break;
13666 case 7: u64EffAddr = pCtx->rdi; break;
13667 case 8: u64EffAddr = pCtx->r8; break;
13668 case 9: u64EffAddr = pCtx->r9; break;
13669 case 10: u64EffAddr = pCtx->r10; break;
13670 case 11: u64EffAddr = pCtx->r11; break;
13671 case 12: u64EffAddr = pCtx->r12; break;
13672 case 13: u64EffAddr = pCtx->r13; break;
13673 case 14: u64EffAddr = pCtx->r14; break;
13674 case 15: u64EffAddr = pCtx->r15; break;
13675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13676 }
13677 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13678
13679 /* add base */
13680 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13681 {
13682 case 0: u64EffAddr += pCtx->rax; break;
13683 case 1: u64EffAddr += pCtx->rcx; break;
13684 case 2: u64EffAddr += pCtx->rdx; break;
13685 case 3: u64EffAddr += pCtx->rbx; break;
13686 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13687 case 6: u64EffAddr += pCtx->rsi; break;
13688 case 7: u64EffAddr += pCtx->rdi; break;
13689 case 8: u64EffAddr += pCtx->r8; break;
13690 case 9: u64EffAddr += pCtx->r9; break;
13691 case 10: u64EffAddr += pCtx->r10; break;
13692 case 11: u64EffAddr += pCtx->r11; break;
13693 case 12: u64EffAddr += pCtx->r12; break;
13694 case 14: u64EffAddr += pCtx->r14; break;
13695 case 15: u64EffAddr += pCtx->r15; break;
13696 /* complicated encodings */
13697 case 5:
13698 case 13:
13699 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13700 {
13701 if (!pVCpu->iem.s.uRexB)
13702 {
13703 u64EffAddr += pCtx->rbp;
13704 SET_SS_DEF();
13705 }
13706 else
13707 u64EffAddr += pCtx->r13;
13708 }
13709 else
13710 {
13711 uint32_t u32Disp;
13712 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13713 u64EffAddr += (int32_t)u32Disp;
13714 }
13715 break;
13716 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13717 }
13718 break;
13719 }
13720 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13721 }
13722
13723 /* Get and add the displacement. */
13724 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13725 {
13726 case 0:
13727 break;
13728 case 1:
13729 {
13730 int8_t i8Disp;
13731 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13732 u64EffAddr += i8Disp;
13733 break;
13734 }
13735 case 2:
13736 {
13737 uint32_t u32Disp;
13738 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13739 u64EffAddr += (int32_t)u32Disp;
13740 break;
13741 }
13742 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13743 }
13744
13745 }
13746
13747 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13748 {
13749 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13750 return u64EffAddr;
13751 }
13752 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13753 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13754 return u64EffAddr & UINT32_MAX;
13755}
13756#endif /* IEM_WITH_SETJMP */
13757
13758
13759/** @} */
13760
13761
13762
13763/*
13764 * Include the instructions
13765 */
13766#include "IEMAllInstructions.cpp.h"
13767
13768
13769
13770
13771#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13772
13773/**
13774 * Sets up execution verification mode.
13775 */
13776IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13777{
13778 PVMCPU pVCpu = pVCpu;
13779 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13780
13781 /*
13782 * Always note down the address of the current instruction.
13783 */
13784 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13785 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13786
13787 /*
13788 * Enable verification and/or logging.
13789 */
13790 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13791 if ( fNewNoRem
13792 && ( 0
13793#if 0 /* auto enable on first paged protected mode interrupt */
13794 || ( pOrgCtx->eflags.Bits.u1IF
13795 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13796 && TRPMHasTrap(pVCpu)
13797 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13798#endif
13799#if 0
13800 || ( pOrgCtx->cs == 0x10
13801 && ( pOrgCtx->rip == 0x90119e3e
13802 || pOrgCtx->rip == 0x901d9810)
13803#endif
13804#if 0 /* Auto enable DSL - FPU stuff. */
13805 || ( pOrgCtx->cs == 0x10
13806 && (// pOrgCtx->rip == 0xc02ec07f
13807 //|| pOrgCtx->rip == 0xc02ec082
13808 //|| pOrgCtx->rip == 0xc02ec0c9
13809 0
13810 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13811#endif
13812#if 0 /* Auto enable DSL - fstp st0 stuff. */
13813 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13814#endif
13815#if 0
13816 || pOrgCtx->rip == 0x9022bb3a
13817#endif
13818#if 0
13819 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13820#endif
13821#if 0
13822 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13823 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13824#endif
13825#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13826 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13827 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13828 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13829#endif
13830#if 0 /* NT4SP1 - xadd early boot. */
13831 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13832#endif
13833#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13835#endif
13836#if 0 /* NT4SP1 - cmpxchg (AMD). */
13837 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13838#endif
13839#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13840 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13841#endif
13842#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13843 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13844
13845#endif
13846#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13847 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13848
13849#endif
13850#if 0 /* NT4SP1 - frstor [ecx] */
13851 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13852#endif
13853#if 0 /* xxxxxx - All long mode code. */
13854 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13855#endif
13856#if 0 /* rep movsq linux 3.7 64-bit boot. */
13857 || (pOrgCtx->rip == 0x0000000000100241)
13858#endif
13859#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13860 || (pOrgCtx->rip == 0x000000000215e240)
13861#endif
13862#if 0 /* DOS's size-overridden iret to v8086. */
13863 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13864#endif
13865 )
13866 )
13867 {
13868 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13869 RTLogFlags(NULL, "enabled");
13870 fNewNoRem = false;
13871 }
13872 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13873 {
13874 pVCpu->iem.s.fNoRem = fNewNoRem;
13875 if (!fNewNoRem)
13876 {
13877 LogAlways(("Enabling verification mode!\n"));
13878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13879 }
13880 else
13881 LogAlways(("Disabling verification mode!\n"));
13882 }
13883
13884 /*
13885 * Switch state.
13886 */
13887 if (IEM_VERIFICATION_ENABLED(pVCpu))
13888 {
13889 static CPUMCTX s_DebugCtx; /* Ugly! */
13890
13891 s_DebugCtx = *pOrgCtx;
13892 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13893 }
13894
13895 /*
13896 * See if there is an interrupt pending in TRPM and inject it if we can.
13897 */
13898 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13899 if ( pOrgCtx->eflags.Bits.u1IF
13900 && TRPMHasTrap(pVCpu)
13901 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13902 {
13903 uint8_t u8TrapNo;
13904 TRPMEVENT enmType;
13905 RTGCUINT uErrCode;
13906 RTGCPTR uCr2;
13907 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13908 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13909 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13910 TRPMResetTrap(pVCpu);
13911 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13912 }
13913
13914 /*
13915 * Reset the counters.
13916 */
13917 pVCpu->iem.s.cIOReads = 0;
13918 pVCpu->iem.s.cIOWrites = 0;
13919 pVCpu->iem.s.fIgnoreRaxRdx = false;
13920 pVCpu->iem.s.fOverlappingMovs = false;
13921 pVCpu->iem.s.fProblematicMemory = false;
13922 pVCpu->iem.s.fUndefinedEFlags = 0;
13923
13924 if (IEM_VERIFICATION_ENABLED(pVCpu))
13925 {
13926 /*
13927 * Free all verification records.
13928 */
13929 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13930 pVCpu->iem.s.pIemEvtRecHead = NULL;
13931 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13932 do
13933 {
13934 while (pEvtRec)
13935 {
13936 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13937 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13938 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13939 pEvtRec = pNext;
13940 }
13941 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13942 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13943 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13944 } while (pEvtRec);
13945 }
13946}
13947
13948
13949/**
13950 * Allocate an event record.
13951 * @returns Pointer to a record.
13952 */
13953IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13954{
13955 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13956 return NULL;
13957
13958 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13959 if (pEvtRec)
13960 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13961 else
13962 {
13963 if (!pVCpu->iem.s.ppIemEvtRecNext)
13964 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13965
13966 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13967 if (!pEvtRec)
13968 return NULL;
13969 }
13970 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13971 pEvtRec->pNext = NULL;
13972 return pEvtRec;
13973}
13974
13975
13976/**
13977 * IOMMMIORead notification.
13978 */
13979VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13980{
13981 PVMCPU pVCpu = VMMGetCpu(pVM);
13982 if (!pVCpu)
13983 return;
13984 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13985 if (!pEvtRec)
13986 return;
13987 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13988 pEvtRec->u.RamRead.GCPhys = GCPhys;
13989 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13990 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13991 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13992}
13993
13994
13995/**
13996 * IOMMMIOWrite notification.
13997 */
13998VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13999{
14000 PVMCPU pVCpu = VMMGetCpu(pVM);
14001 if (!pVCpu)
14002 return;
14003 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14004 if (!pEvtRec)
14005 return;
14006 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14007 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14008 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14009 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14010 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14011 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14012 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14013 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14014 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14015}
14016
14017
14018/**
14019 * IOMIOPortRead notification.
14020 */
14021VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14022{
14023 PVMCPU pVCpu = VMMGetCpu(pVM);
14024 if (!pVCpu)
14025 return;
14026 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14027 if (!pEvtRec)
14028 return;
14029 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14030 pEvtRec->u.IOPortRead.Port = Port;
14031 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14032 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14033 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14034}
14035
14036/**
14037 * IOMIOPortWrite notification.
14038 */
14039VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14040{
14041 PVMCPU pVCpu = VMMGetCpu(pVM);
14042 if (!pVCpu)
14043 return;
14044 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14045 if (!pEvtRec)
14046 return;
14047 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14048 pEvtRec->u.IOPortWrite.Port = Port;
14049 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14050 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14051 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14052 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14053}
14054
14055
14056VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14057{
14058 PVMCPU pVCpu = VMMGetCpu(pVM);
14059 if (!pVCpu)
14060 return;
14061 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14062 if (!pEvtRec)
14063 return;
14064 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14065 pEvtRec->u.IOPortStrRead.Port = Port;
14066 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14067 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14068 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14069 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14070}
14071
14072
14073VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14074{
14075 PVMCPU pVCpu = VMMGetCpu(pVM);
14076 if (!pVCpu)
14077 return;
14078 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14079 if (!pEvtRec)
14080 return;
14081 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14082 pEvtRec->u.IOPortStrWrite.Port = Port;
14083 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14084 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14085 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14086 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14087}
14088
14089
14090/**
14091 * Fakes and records an I/O port read.
14092 *
14093 * @returns VINF_SUCCESS.
14094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14095 * @param Port The I/O port.
14096 * @param pu32Value Where to store the fake value.
14097 * @param cbValue The size of the access.
14098 */
14099IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14100{
14101 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14102 if (pEvtRec)
14103 {
14104 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14105 pEvtRec->u.IOPortRead.Port = Port;
14106 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14107 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14108 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14109 }
14110 pVCpu->iem.s.cIOReads++;
14111 *pu32Value = 0xcccccccc;
14112 return VINF_SUCCESS;
14113}
14114
14115
14116/**
14117 * Fakes and records an I/O port write.
14118 *
14119 * @returns VINF_SUCCESS.
14120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14121 * @param Port The I/O port.
14122 * @param u32Value The value being written.
14123 * @param cbValue The size of the access.
14124 */
14125IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14126{
14127 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14128 if (pEvtRec)
14129 {
14130 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14131 pEvtRec->u.IOPortWrite.Port = Port;
14132 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14133 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14134 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14135 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14136 }
14137 pVCpu->iem.s.cIOWrites++;
14138 return VINF_SUCCESS;
14139}
14140
14141
14142/**
14143 * Used to add extra details about a stub case.
14144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14145 */
14146IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14147{
14148 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14149 PVM pVM = pVCpu->CTX_SUFF(pVM);
14150 PVMCPU pVCpu = pVCpu;
14151 char szRegs[4096];
14152 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14153 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14154 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14155 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14156 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14157 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14158 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14159 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14160 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14161 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14162 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14163 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14164 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14165 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14166 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14167 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14168 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14169 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14170 " efer=%016VR{efer}\n"
14171 " pat=%016VR{pat}\n"
14172 " sf_mask=%016VR{sf_mask}\n"
14173 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14174 " lstar=%016VR{lstar}\n"
14175 " star=%016VR{star} cstar=%016VR{cstar}\n"
14176 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14177 );
14178
14179 char szInstr1[256];
14180 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14181 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14182 szInstr1, sizeof(szInstr1), NULL);
14183 char szInstr2[256];
14184 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14185 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14186 szInstr2, sizeof(szInstr2), NULL);
14187
14188 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14189}
14190
14191
14192/**
14193 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14194 * dump to the assertion info.
14195 *
14196 * @param pEvtRec The record to dump.
14197 */
14198IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14199{
14200 switch (pEvtRec->enmEvent)
14201 {
14202 case IEMVERIFYEVENT_IOPORT_READ:
14203 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14204 pEvtRec->u.IOPortWrite.Port,
14205 pEvtRec->u.IOPortWrite.cbValue);
14206 break;
14207 case IEMVERIFYEVENT_IOPORT_WRITE:
14208 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14209 pEvtRec->u.IOPortWrite.Port,
14210 pEvtRec->u.IOPortWrite.cbValue,
14211 pEvtRec->u.IOPortWrite.u32Value);
14212 break;
14213 case IEMVERIFYEVENT_IOPORT_STR_READ:
14214 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14215 pEvtRec->u.IOPortStrWrite.Port,
14216 pEvtRec->u.IOPortStrWrite.cbValue,
14217 pEvtRec->u.IOPortStrWrite.cTransfers);
14218 break;
14219 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14220 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14221 pEvtRec->u.IOPortStrWrite.Port,
14222 pEvtRec->u.IOPortStrWrite.cbValue,
14223 pEvtRec->u.IOPortStrWrite.cTransfers);
14224 break;
14225 case IEMVERIFYEVENT_RAM_READ:
14226 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14227 pEvtRec->u.RamRead.GCPhys,
14228 pEvtRec->u.RamRead.cb);
14229 break;
14230 case IEMVERIFYEVENT_RAM_WRITE:
14231 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14232 pEvtRec->u.RamWrite.GCPhys,
14233 pEvtRec->u.RamWrite.cb,
14234 (int)pEvtRec->u.RamWrite.cb,
14235 pEvtRec->u.RamWrite.ab);
14236 break;
14237 default:
14238 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14239 break;
14240 }
14241}
14242
14243
14244/**
14245 * Raises an assertion on the specified record, showing the given message with
14246 * a record dump attached.
14247 *
14248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14249 * @param pEvtRec1 The first record.
14250 * @param pEvtRec2 The second record.
14251 * @param pszMsg The message explaining why we're asserting.
14252 */
14253IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14254{
14255 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14256 iemVerifyAssertAddRecordDump(pEvtRec1);
14257 iemVerifyAssertAddRecordDump(pEvtRec2);
14258 iemVerifyAssertMsg2(pVCpu);
14259 RTAssertPanic();
14260}
14261
14262
14263/**
14264 * Raises an assertion on the specified record, showing the given message with
14265 * a record dump attached.
14266 *
14267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14268 * @param pEvtRec1 The first record.
14269 * @param pszMsg The message explaining why we're asserting.
14270 */
14271IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14272{
14273 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14274 iemVerifyAssertAddRecordDump(pEvtRec);
14275 iemVerifyAssertMsg2(pVCpu);
14276 RTAssertPanic();
14277}
14278
14279
14280/**
14281 * Verifies a write record.
14282 *
14283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14284 * @param pEvtRec The write record.
14285 * @param fRem Set if REM was doing the other executing. If clear
14286 * it was HM.
14287 */
14288IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14289{
14290 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14291 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14292 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14293 if ( RT_FAILURE(rc)
14294 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14295 {
14296 /* fend off ins */
14297 if ( !pVCpu->iem.s.cIOReads
14298 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14299 || ( pEvtRec->u.RamWrite.cb != 1
14300 && pEvtRec->u.RamWrite.cb != 2
14301 && pEvtRec->u.RamWrite.cb != 4) )
14302 {
14303 /* fend off ROMs and MMIO */
14304 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14305 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14306 {
14307 /* fend off fxsave */
14308 if (pEvtRec->u.RamWrite.cb != 512)
14309 {
14310 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14311 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14312 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14313 RTAssertMsg2Add("%s: %.*Rhxs\n"
14314 "iem: %.*Rhxs\n",
14315 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14316 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14317 iemVerifyAssertAddRecordDump(pEvtRec);
14318 iemVerifyAssertMsg2(pVCpu);
14319 RTAssertPanic();
14320 }
14321 }
14322 }
14323 }
14324
14325}
14326
14327/**
14328 * Performs the post-execution verfication checks.
14329 */
14330IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14331{
14332 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14333 return rcStrictIem;
14334
14335 /*
14336 * Switch back the state.
14337 */
14338 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14339 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14340 Assert(pOrgCtx != pDebugCtx);
14341 IEM_GET_CTX(pVCpu) = pOrgCtx;
14342
14343 /*
14344 * Execute the instruction in REM.
14345 */
14346 bool fRem = false;
14347 PVM pVM = pVCpu->CTX_SUFF(pVM);
14348 PVMCPU pVCpu = pVCpu;
14349 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14350#ifdef IEM_VERIFICATION_MODE_FULL_HM
14351 if ( HMIsEnabled(pVM)
14352 && pVCpu->iem.s.cIOReads == 0
14353 && pVCpu->iem.s.cIOWrites == 0
14354 && !pVCpu->iem.s.fProblematicMemory)
14355 {
14356 uint64_t uStartRip = pOrgCtx->rip;
14357 unsigned iLoops = 0;
14358 do
14359 {
14360 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14361 iLoops++;
14362 } while ( rc == VINF_SUCCESS
14363 || ( rc == VINF_EM_DBG_STEPPED
14364 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14365 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14366 || ( pOrgCtx->rip != pDebugCtx->rip
14367 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14368 && iLoops < 8) );
14369 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14370 rc = VINF_SUCCESS;
14371 }
14372#endif
14373 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14374 || rc == VINF_IOM_R3_IOPORT_READ
14375 || rc == VINF_IOM_R3_IOPORT_WRITE
14376 || rc == VINF_IOM_R3_MMIO_READ
14377 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14378 || rc == VINF_IOM_R3_MMIO_WRITE
14379 || rc == VINF_CPUM_R3_MSR_READ
14380 || rc == VINF_CPUM_R3_MSR_WRITE
14381 || rc == VINF_EM_RESCHEDULE
14382 )
14383 {
14384 EMRemLock(pVM);
14385 rc = REMR3EmulateInstruction(pVM, pVCpu);
14386 AssertRC(rc);
14387 EMRemUnlock(pVM);
14388 fRem = true;
14389 }
14390
14391# if 1 /* Skip unimplemented instructions for now. */
14392 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14393 {
14394 IEM_GET_CTX(pVCpu) = pOrgCtx;
14395 if (rc == VINF_EM_DBG_STEPPED)
14396 return VINF_SUCCESS;
14397 return rc;
14398 }
14399# endif
14400
14401 /*
14402 * Compare the register states.
14403 */
14404 unsigned cDiffs = 0;
14405 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14406 {
14407 //Log(("REM and IEM ends up with different registers!\n"));
14408 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14409
14410# define CHECK_FIELD(a_Field) \
14411 do \
14412 { \
14413 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14414 { \
14415 switch (sizeof(pOrgCtx->a_Field)) \
14416 { \
14417 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14418 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14419 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14420 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14421 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14422 } \
14423 cDiffs++; \
14424 } \
14425 } while (0)
14426# define CHECK_XSTATE_FIELD(a_Field) \
14427 do \
14428 { \
14429 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14430 { \
14431 switch (sizeof(pOrgXState->a_Field)) \
14432 { \
14433 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14434 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14435 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14436 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14437 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14438 } \
14439 cDiffs++; \
14440 } \
14441 } while (0)
14442
14443# define CHECK_BIT_FIELD(a_Field) \
14444 do \
14445 { \
14446 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14447 { \
14448 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14449 cDiffs++; \
14450 } \
14451 } while (0)
14452
14453# define CHECK_SEL(a_Sel) \
14454 do \
14455 { \
14456 CHECK_FIELD(a_Sel.Sel); \
14457 CHECK_FIELD(a_Sel.Attr.u); \
14458 CHECK_FIELD(a_Sel.u64Base); \
14459 CHECK_FIELD(a_Sel.u32Limit); \
14460 CHECK_FIELD(a_Sel.fFlags); \
14461 } while (0)
14462
14463 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14464 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14465
14466#if 1 /* The recompiler doesn't update these the intel way. */
14467 if (fRem)
14468 {
14469 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14470 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14471 pOrgXState->x87.CS = pDebugXState->x87.CS;
14472 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14473 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14474 pOrgXState->x87.DS = pDebugXState->x87.DS;
14475 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14476 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14477 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14478 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14479 }
14480#endif
14481 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14482 {
14483 RTAssertMsg2Weak(" the FPU state differs\n");
14484 cDiffs++;
14485 CHECK_XSTATE_FIELD(x87.FCW);
14486 CHECK_XSTATE_FIELD(x87.FSW);
14487 CHECK_XSTATE_FIELD(x87.FTW);
14488 CHECK_XSTATE_FIELD(x87.FOP);
14489 CHECK_XSTATE_FIELD(x87.FPUIP);
14490 CHECK_XSTATE_FIELD(x87.CS);
14491 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14492 CHECK_XSTATE_FIELD(x87.FPUDP);
14493 CHECK_XSTATE_FIELD(x87.DS);
14494 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14495 CHECK_XSTATE_FIELD(x87.MXCSR);
14496 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14497 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14498 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14499 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14500 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14501 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14502 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14503 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14504 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14505 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14506 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14507 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14508 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14509 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14510 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14511 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14512 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14513 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14514 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14515 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14516 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14517 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14518 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14519 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14520 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14521 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14522 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14523 }
14524 CHECK_FIELD(rip);
14525 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14526 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14527 {
14528 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14529 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14530 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14531 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14532 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14533 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14534 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14535 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14536 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14537 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14538 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14539 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14540 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14541 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14542 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14543 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14544 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14545 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14546 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14547 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14548 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14549 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14550 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14551 }
14552
14553 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14554 CHECK_FIELD(rax);
14555 CHECK_FIELD(rcx);
14556 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14557 CHECK_FIELD(rdx);
14558 CHECK_FIELD(rbx);
14559 CHECK_FIELD(rsp);
14560 CHECK_FIELD(rbp);
14561 CHECK_FIELD(rsi);
14562 CHECK_FIELD(rdi);
14563 CHECK_FIELD(r8);
14564 CHECK_FIELD(r9);
14565 CHECK_FIELD(r10);
14566 CHECK_FIELD(r11);
14567 CHECK_FIELD(r12);
14568 CHECK_FIELD(r13);
14569 CHECK_SEL(cs);
14570 CHECK_SEL(ss);
14571 CHECK_SEL(ds);
14572 CHECK_SEL(es);
14573 CHECK_SEL(fs);
14574 CHECK_SEL(gs);
14575 CHECK_FIELD(cr0);
14576
14577 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14578 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14579 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14580 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14581 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14582 {
14583 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14584 { /* ignore */ }
14585 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14586 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14587 && fRem)
14588 { /* ignore */ }
14589 else
14590 CHECK_FIELD(cr2);
14591 }
14592 CHECK_FIELD(cr3);
14593 CHECK_FIELD(cr4);
14594 CHECK_FIELD(dr[0]);
14595 CHECK_FIELD(dr[1]);
14596 CHECK_FIELD(dr[2]);
14597 CHECK_FIELD(dr[3]);
14598 CHECK_FIELD(dr[6]);
14599 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14600 CHECK_FIELD(dr[7]);
14601 CHECK_FIELD(gdtr.cbGdt);
14602 CHECK_FIELD(gdtr.pGdt);
14603 CHECK_FIELD(idtr.cbIdt);
14604 CHECK_FIELD(idtr.pIdt);
14605 CHECK_SEL(ldtr);
14606 CHECK_SEL(tr);
14607 CHECK_FIELD(SysEnter.cs);
14608 CHECK_FIELD(SysEnter.eip);
14609 CHECK_FIELD(SysEnter.esp);
14610 CHECK_FIELD(msrEFER);
14611 CHECK_FIELD(msrSTAR);
14612 CHECK_FIELD(msrPAT);
14613 CHECK_FIELD(msrLSTAR);
14614 CHECK_FIELD(msrCSTAR);
14615 CHECK_FIELD(msrSFMASK);
14616 CHECK_FIELD(msrKERNELGSBASE);
14617
14618 if (cDiffs != 0)
14619 {
14620 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14621 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14622 RTAssertPanic();
14623 static bool volatile s_fEnterDebugger = true;
14624 if (s_fEnterDebugger)
14625 DBGFSTOP(pVM);
14626
14627# if 1 /* Ignore unimplemented instructions for now. */
14628 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14629 rcStrictIem = VINF_SUCCESS;
14630# endif
14631 }
14632# undef CHECK_FIELD
14633# undef CHECK_BIT_FIELD
14634 }
14635
14636 /*
14637 * If the register state compared fine, check the verification event
14638 * records.
14639 */
14640 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14641 {
14642 /*
14643 * Compare verficiation event records.
14644 * - I/O port accesses should be a 1:1 match.
14645 */
14646 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14647 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14648 while (pIemRec && pOtherRec)
14649 {
14650 /* Since we might miss RAM writes and reads, ignore reads and check
14651 that any written memory is the same extra ones. */
14652 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14653 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14654 && pIemRec->pNext)
14655 {
14656 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14657 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14658 pIemRec = pIemRec->pNext;
14659 }
14660
14661 /* Do the compare. */
14662 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14663 {
14664 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14665 break;
14666 }
14667 bool fEquals;
14668 switch (pIemRec->enmEvent)
14669 {
14670 case IEMVERIFYEVENT_IOPORT_READ:
14671 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14672 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14673 break;
14674 case IEMVERIFYEVENT_IOPORT_WRITE:
14675 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14676 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14677 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14678 break;
14679 case IEMVERIFYEVENT_IOPORT_STR_READ:
14680 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14681 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14682 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14683 break;
14684 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14685 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14686 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14687 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14688 break;
14689 case IEMVERIFYEVENT_RAM_READ:
14690 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14691 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14692 break;
14693 case IEMVERIFYEVENT_RAM_WRITE:
14694 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14695 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14696 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14697 break;
14698 default:
14699 fEquals = false;
14700 break;
14701 }
14702 if (!fEquals)
14703 {
14704 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14705 break;
14706 }
14707
14708 /* advance */
14709 pIemRec = pIemRec->pNext;
14710 pOtherRec = pOtherRec->pNext;
14711 }
14712
14713 /* Ignore extra writes and reads. */
14714 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14715 {
14716 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14717 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14718 pIemRec = pIemRec->pNext;
14719 }
14720 if (pIemRec != NULL)
14721 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14722 else if (pOtherRec != NULL)
14723 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14724 }
14725 IEM_GET_CTX(pVCpu) = pOrgCtx;
14726
14727 return rcStrictIem;
14728}
14729
14730#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14731
14732/* stubs */
14733IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14734{
14735 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14736 return VERR_INTERNAL_ERROR;
14737}
14738
14739IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14740{
14741 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14742 return VERR_INTERNAL_ERROR;
14743}
14744
14745#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14746
14747
14748#ifdef LOG_ENABLED
14749/**
14750 * Logs the current instruction.
14751 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14752 * @param pCtx The current CPU context.
14753 * @param fSameCtx Set if we have the same context information as the VMM,
14754 * clear if we may have already executed an instruction in
14755 * our debug context. When clear, we assume IEMCPU holds
14756 * valid CPU mode info.
14757 */
14758IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14759{
14760# ifdef IN_RING3
14761 if (LogIs2Enabled())
14762 {
14763 char szInstr[256];
14764 uint32_t cbInstr = 0;
14765 if (fSameCtx)
14766 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14767 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14768 szInstr, sizeof(szInstr), &cbInstr);
14769 else
14770 {
14771 uint32_t fFlags = 0;
14772 switch (pVCpu->iem.s.enmCpuMode)
14773 {
14774 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14775 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14776 case IEMMODE_16BIT:
14777 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14778 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14779 else
14780 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14781 break;
14782 }
14783 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14784 szInstr, sizeof(szInstr), &cbInstr);
14785 }
14786
14787 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14788 Log2(("****\n"
14789 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14790 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14791 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14792 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14793 " %s\n"
14794 ,
14795 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14796 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14797 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14798 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14799 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14800 szInstr));
14801
14802 if (LogIs3Enabled())
14803 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14804 }
14805 else
14806# endif
14807 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14808 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14809 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14810}
14811#endif
14812
14813
14814/**
14815 * Makes status code addjustments (pass up from I/O and access handler)
14816 * as well as maintaining statistics.
14817 *
14818 * @returns Strict VBox status code to pass up.
14819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14820 * @param rcStrict The status from executing an instruction.
14821 */
14822DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14823{
14824 if (rcStrict != VINF_SUCCESS)
14825 {
14826 if (RT_SUCCESS(rcStrict))
14827 {
14828 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14829 || rcStrict == VINF_IOM_R3_IOPORT_READ
14830 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14831 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14832 || rcStrict == VINF_IOM_R3_MMIO_READ
14833 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14834 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14835 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14836 || rcStrict == VINF_CPUM_R3_MSR_READ
14837 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14838 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14839 || rcStrict == VINF_EM_RAW_TO_R3
14840 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14841 /* raw-mode / virt handlers only: */
14842 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14843 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14844 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14845 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14846 || rcStrict == VINF_SELM_SYNC_GDT
14847 || rcStrict == VINF_CSAM_PENDING_ACTION
14848 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14849 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14850/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14851 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14852 if (rcPassUp == VINF_SUCCESS)
14853 pVCpu->iem.s.cRetInfStatuses++;
14854 else if ( rcPassUp < VINF_EM_FIRST
14855 || rcPassUp > VINF_EM_LAST
14856 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14857 {
14858 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14859 pVCpu->iem.s.cRetPassUpStatus++;
14860 rcStrict = rcPassUp;
14861 }
14862 else
14863 {
14864 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14865 pVCpu->iem.s.cRetInfStatuses++;
14866 }
14867 }
14868 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14869 pVCpu->iem.s.cRetAspectNotImplemented++;
14870 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14871 pVCpu->iem.s.cRetInstrNotImplemented++;
14872#ifdef IEM_VERIFICATION_MODE_FULL
14873 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14874 rcStrict = VINF_SUCCESS;
14875#endif
14876 else
14877 pVCpu->iem.s.cRetErrStatuses++;
14878 }
14879 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14880 {
14881 pVCpu->iem.s.cRetPassUpStatus++;
14882 rcStrict = pVCpu->iem.s.rcPassUp;
14883 }
14884
14885 return rcStrict;
14886}
14887
14888
14889/**
14890 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14891 * IEMExecOneWithPrefetchedByPC.
14892 *
14893 * Similar code is found in IEMExecLots.
14894 *
14895 * @return Strict VBox status code.
14896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14898 * @param fExecuteInhibit If set, execute the instruction following CLI,
14899 * POP SS and MOV SS,GR.
14900 */
14901DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14902{
14903#ifdef IEM_WITH_SETJMP
14904 VBOXSTRICTRC rcStrict;
14905 jmp_buf JmpBuf;
14906 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14907 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14908 if ((rcStrict = setjmp(JmpBuf)) == 0)
14909 {
14910 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14911 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14912 }
14913 else
14914 pVCpu->iem.s.cLongJumps++;
14915 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14916#else
14917 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14918 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14919#endif
14920 if (rcStrict == VINF_SUCCESS)
14921 pVCpu->iem.s.cInstructions++;
14922 if (pVCpu->iem.s.cActiveMappings > 0)
14923 {
14924 Assert(rcStrict != VINF_SUCCESS);
14925 iemMemRollback(pVCpu);
14926 }
14927//#ifdef DEBUG
14928// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14929//#endif
14930
14931 /* Execute the next instruction as well if a cli, pop ss or
14932 mov ss, Gr has just completed successfully. */
14933 if ( fExecuteInhibit
14934 && rcStrict == VINF_SUCCESS
14935 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14936 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14937 {
14938 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14939 if (rcStrict == VINF_SUCCESS)
14940 {
14941#ifdef LOG_ENABLED
14942 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14943#endif
14944#ifdef IEM_WITH_SETJMP
14945 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14946 if ((rcStrict = setjmp(JmpBuf)) == 0)
14947 {
14948 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14949 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14950 }
14951 else
14952 pVCpu->iem.s.cLongJumps++;
14953 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14954#else
14955 IEM_OPCODE_GET_NEXT_U8(&b);
14956 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14957#endif
14958 if (rcStrict == VINF_SUCCESS)
14959 pVCpu->iem.s.cInstructions++;
14960 if (pVCpu->iem.s.cActiveMappings > 0)
14961 {
14962 Assert(rcStrict != VINF_SUCCESS);
14963 iemMemRollback(pVCpu);
14964 }
14965 }
14966 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14967 }
14968
14969 /*
14970 * Return value fiddling, statistics and sanity assertions.
14971 */
14972 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14973
14974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14976#if defined(IEM_VERIFICATION_MODE_FULL)
14977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14981#endif
14982 return rcStrict;
14983}
14984
14985
14986#ifdef IN_RC
14987/**
14988 * Re-enters raw-mode or ensure we return to ring-3.
14989 *
14990 * @returns rcStrict, maybe modified.
14991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14992 * @param pCtx The current CPU context.
14993 * @param rcStrict The status code returne by the interpreter.
14994 */
14995DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14996{
14997 if ( !pVCpu->iem.s.fInPatchCode
14998 && ( rcStrict == VINF_SUCCESS
14999 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15000 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15001 {
15002 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15003 CPUMRawEnter(pVCpu);
15004 else
15005 {
15006 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15007 rcStrict = VINF_EM_RESCHEDULE;
15008 }
15009 }
15010 return rcStrict;
15011}
15012#endif
15013
15014
15015/**
15016 * Execute one instruction.
15017 *
15018 * @return Strict VBox status code.
15019 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15020 */
15021VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15022{
15023#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15024 if (++pVCpu->iem.s.cVerifyDepth == 1)
15025 iemExecVerificationModeSetup(pVCpu);
15026#endif
15027#ifdef LOG_ENABLED
15028 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15029 iemLogCurInstr(pVCpu, pCtx, true);
15030#endif
15031
15032 /*
15033 * Do the decoding and emulation.
15034 */
15035 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15036 if (rcStrict == VINF_SUCCESS)
15037 rcStrict = iemExecOneInner(pVCpu, true);
15038
15039#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15040 /*
15041 * Assert some sanity.
15042 */
15043 if (pVCpu->iem.s.cVerifyDepth == 1)
15044 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15045 pVCpu->iem.s.cVerifyDepth--;
15046#endif
15047#ifdef IN_RC
15048 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15049#endif
15050 if (rcStrict != VINF_SUCCESS)
15051 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15052 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15053 return rcStrict;
15054}
15055
15056
15057VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15058{
15059 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15060 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15061
15062 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15063 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15064 if (rcStrict == VINF_SUCCESS)
15065 {
15066 rcStrict = iemExecOneInner(pVCpu, true);
15067 if (pcbWritten)
15068 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15069 }
15070
15071#ifdef IN_RC
15072 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15073#endif
15074 return rcStrict;
15075}
15076
15077
15078VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15079 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15080{
15081 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15082 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15083
15084 VBOXSTRICTRC rcStrict;
15085 if ( cbOpcodeBytes
15086 && pCtx->rip == OpcodeBytesPC)
15087 {
15088 iemInitDecoder(pVCpu, false);
15089#ifdef IEM_WITH_CODE_TLB
15090 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15091 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15092 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15093 pVCpu->iem.s.offCurInstrStart = 0;
15094 pVCpu->iem.s.offInstrNextByte = 0;
15095#else
15096 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15097 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15098#endif
15099 rcStrict = VINF_SUCCESS;
15100 }
15101 else
15102 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15103 if (rcStrict == VINF_SUCCESS)
15104 {
15105 rcStrict = iemExecOneInner(pVCpu, true);
15106 }
15107
15108#ifdef IN_RC
15109 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15110#endif
15111 return rcStrict;
15112}
15113
15114
15115VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15116{
15117 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15118 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15119
15120 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15121 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15122 if (rcStrict == VINF_SUCCESS)
15123 {
15124 rcStrict = iemExecOneInner(pVCpu, false);
15125 if (pcbWritten)
15126 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15127 }
15128
15129#ifdef IN_RC
15130 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15131#endif
15132 return rcStrict;
15133}
15134
15135
15136VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15137 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15138{
15139 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15140 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15141
15142 VBOXSTRICTRC rcStrict;
15143 if ( cbOpcodeBytes
15144 && pCtx->rip == OpcodeBytesPC)
15145 {
15146 iemInitDecoder(pVCpu, true);
15147#ifdef IEM_WITH_CODE_TLB
15148 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15149 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15150 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15151 pVCpu->iem.s.offCurInstrStart = 0;
15152 pVCpu->iem.s.offInstrNextByte = 0;
15153#else
15154 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15155 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15156#endif
15157 rcStrict = VINF_SUCCESS;
15158 }
15159 else
15160 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15161 if (rcStrict == VINF_SUCCESS)
15162 rcStrict = iemExecOneInner(pVCpu, false);
15163
15164#ifdef IN_RC
15165 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15166#endif
15167 return rcStrict;
15168}
15169
15170
15171/**
15172 * For debugging DISGetParamSize, may come in handy.
15173 *
15174 * @returns Strict VBox status code.
15175 * @param pVCpu The cross context virtual CPU structure of the
15176 * calling EMT.
15177 * @param pCtxCore The context core structure.
15178 * @param OpcodeBytesPC The PC of the opcode bytes.
15179 * @param pvOpcodeBytes Prefeched opcode bytes.
15180 * @param cbOpcodeBytes Number of prefetched bytes.
15181 * @param pcbWritten Where to return the number of bytes written.
15182 * Optional.
15183 */
15184VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15185 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15186 uint32_t *pcbWritten)
15187{
15188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15189 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15190
15191 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15192 VBOXSTRICTRC rcStrict;
15193 if ( cbOpcodeBytes
15194 && pCtx->rip == OpcodeBytesPC)
15195 {
15196 iemInitDecoder(pVCpu, true);
15197#ifdef IEM_WITH_CODE_TLB
15198 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15199 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15200 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15201 pVCpu->iem.s.offCurInstrStart = 0;
15202 pVCpu->iem.s.offInstrNextByte = 0;
15203#else
15204 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15205 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15206#endif
15207 rcStrict = VINF_SUCCESS;
15208 }
15209 else
15210 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15211 if (rcStrict == VINF_SUCCESS)
15212 {
15213 rcStrict = iemExecOneInner(pVCpu, false);
15214 if (pcbWritten)
15215 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15216 }
15217
15218#ifdef IN_RC
15219 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15220#endif
15221 return rcStrict;
15222}
15223
15224
15225VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15226{
15227 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15228
15229#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15230 /*
15231 * See if there is an interrupt pending in TRPM, inject it if we can.
15232 */
15233 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15234# ifdef IEM_VERIFICATION_MODE_FULL
15235 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15236# endif
15237 if ( pCtx->eflags.Bits.u1IF
15238 && TRPMHasTrap(pVCpu)
15239 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15240 {
15241 uint8_t u8TrapNo;
15242 TRPMEVENT enmType;
15243 RTGCUINT uErrCode;
15244 RTGCPTR uCr2;
15245 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15246 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15247 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15248 TRPMResetTrap(pVCpu);
15249 }
15250
15251 /*
15252 * Log the state.
15253 */
15254# ifdef LOG_ENABLED
15255 iemLogCurInstr(pVCpu, pCtx, true);
15256# endif
15257
15258 /*
15259 * Do the decoding and emulation.
15260 */
15261 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15262 if (rcStrict == VINF_SUCCESS)
15263 rcStrict = iemExecOneInner(pVCpu, true);
15264
15265 /*
15266 * Assert some sanity.
15267 */
15268 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15269
15270 /*
15271 * Log and return.
15272 */
15273 if (rcStrict != VINF_SUCCESS)
15274 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15275 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15276 if (pcInstructions)
15277 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15278 return rcStrict;
15279
15280#else /* Not verification mode */
15281
15282 /*
15283 * See if there is an interrupt pending in TRPM, inject it if we can.
15284 */
15285 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15286# ifdef IEM_VERIFICATION_MODE_FULL
15287 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15288# endif
15289 if ( pCtx->eflags.Bits.u1IF
15290 && TRPMHasTrap(pVCpu)
15291 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15292 {
15293 uint8_t u8TrapNo;
15294 TRPMEVENT enmType;
15295 RTGCUINT uErrCode;
15296 RTGCPTR uCr2;
15297 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15298 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15299 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15300 TRPMResetTrap(pVCpu);
15301 }
15302
15303 /*
15304 * Initial decoder init w/ prefetch, then setup setjmp.
15305 */
15306 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15307 if (rcStrict == VINF_SUCCESS)
15308 {
15309# ifdef IEM_WITH_SETJMP
15310 jmp_buf JmpBuf;
15311 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15312 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15313 pVCpu->iem.s.cActiveMappings = 0;
15314 if ((rcStrict = setjmp(JmpBuf)) == 0)
15315# endif
15316 {
15317 /*
15318 * The run loop. We limit ourselves to 4096 instructions right now.
15319 */
15320 PVM pVM = pVCpu->CTX_SUFF(pVM);
15321 uint32_t cInstr = 4096;
15322 for (;;)
15323 {
15324 /*
15325 * Log the state.
15326 */
15327# ifdef LOG_ENABLED
15328 iemLogCurInstr(pVCpu, pCtx, true);
15329# endif
15330
15331 /*
15332 * Do the decoding and emulation.
15333 */
15334 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15335 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15336 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15337 {
15338 Assert(pVCpu->iem.s.cActiveMappings == 0);
15339 pVCpu->iem.s.cInstructions++;
15340 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15341 {
15342 uint32_t fCpu = pVCpu->fLocalForcedActions
15343 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15344 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15345 | VMCPU_FF_TLB_FLUSH
15346# ifdef VBOX_WITH_RAW_MODE
15347 | VMCPU_FF_TRPM_SYNC_IDT
15348 | VMCPU_FF_SELM_SYNC_TSS
15349 | VMCPU_FF_SELM_SYNC_GDT
15350 | VMCPU_FF_SELM_SYNC_LDT
15351# endif
15352 | VMCPU_FF_INHIBIT_INTERRUPTS
15353 | VMCPU_FF_BLOCK_NMIS
15354 | VMCPU_FF_UNHALT ));
15355
15356 if (RT_LIKELY( ( !fCpu
15357 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15358 && !pCtx->rflags.Bits.u1IF) )
15359 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15360 {
15361 if (cInstr-- > 0)
15362 {
15363 Assert(pVCpu->iem.s.cActiveMappings == 0);
15364 iemReInitDecoder(pVCpu);
15365 continue;
15366 }
15367 }
15368 }
15369 Assert(pVCpu->iem.s.cActiveMappings == 0);
15370 }
15371 else if (pVCpu->iem.s.cActiveMappings > 0)
15372 iemMemRollback(pVCpu);
15373 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15374 break;
15375 }
15376 }
15377# ifdef IEM_WITH_SETJMP
15378 else
15379 {
15380 if (pVCpu->iem.s.cActiveMappings > 0)
15381 iemMemRollback(pVCpu);
15382 pVCpu->iem.s.cLongJumps++;
15383 }
15384 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15385# endif
15386
15387 /*
15388 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15389 */
15390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15392# if defined(IEM_VERIFICATION_MODE_FULL)
15393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15397# endif
15398 }
15399
15400 /*
15401 * Maybe re-enter raw-mode and log.
15402 */
15403# ifdef IN_RC
15404 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15405# endif
15406 if (rcStrict != VINF_SUCCESS)
15407 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15408 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15409 if (pcInstructions)
15410 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15411 return rcStrict;
15412#endif /* Not verification mode */
15413}
15414
15415
15416
15417/**
15418 * Injects a trap, fault, abort, software interrupt or external interrupt.
15419 *
15420 * The parameter list matches TRPMQueryTrapAll pretty closely.
15421 *
15422 * @returns Strict VBox status code.
15423 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15424 * @param u8TrapNo The trap number.
15425 * @param enmType What type is it (trap/fault/abort), software
15426 * interrupt or hardware interrupt.
15427 * @param uErrCode The error code if applicable.
15428 * @param uCr2 The CR2 value if applicable.
15429 * @param cbInstr The instruction length (only relevant for
15430 * software interrupts).
15431 */
15432VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15433 uint8_t cbInstr)
15434{
15435 iemInitDecoder(pVCpu, false);
15436#ifdef DBGFTRACE_ENABLED
15437 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15438 u8TrapNo, enmType, uErrCode, uCr2);
15439#endif
15440
15441 uint32_t fFlags;
15442 switch (enmType)
15443 {
15444 case TRPM_HARDWARE_INT:
15445 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15446 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15447 uErrCode = uCr2 = 0;
15448 break;
15449
15450 case TRPM_SOFTWARE_INT:
15451 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15452 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15453 uErrCode = uCr2 = 0;
15454 break;
15455
15456 case TRPM_TRAP:
15457 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15458 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15459 if (u8TrapNo == X86_XCPT_PF)
15460 fFlags |= IEM_XCPT_FLAGS_CR2;
15461 switch (u8TrapNo)
15462 {
15463 case X86_XCPT_DF:
15464 case X86_XCPT_TS:
15465 case X86_XCPT_NP:
15466 case X86_XCPT_SS:
15467 case X86_XCPT_PF:
15468 case X86_XCPT_AC:
15469 fFlags |= IEM_XCPT_FLAGS_ERR;
15470 break;
15471
15472 case X86_XCPT_NMI:
15473 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15474 break;
15475 }
15476 break;
15477
15478 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15479 }
15480
15481 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15482}
15483
15484
15485/**
15486 * Injects the active TRPM event.
15487 *
15488 * @returns Strict VBox status code.
15489 * @param pVCpu The cross context virtual CPU structure.
15490 */
15491VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15492{
15493#ifndef IEM_IMPLEMENTS_TASKSWITCH
15494 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15495#else
15496 uint8_t u8TrapNo;
15497 TRPMEVENT enmType;
15498 RTGCUINT uErrCode;
15499 RTGCUINTPTR uCr2;
15500 uint8_t cbInstr;
15501 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15502 if (RT_FAILURE(rc))
15503 return rc;
15504
15505 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15506
15507 /** @todo Are there any other codes that imply the event was successfully
15508 * delivered to the guest? See @bugref{6607}. */
15509 if ( rcStrict == VINF_SUCCESS
15510 || rcStrict == VINF_IEM_RAISED_XCPT)
15511 {
15512 TRPMResetTrap(pVCpu);
15513 }
15514 return rcStrict;
15515#endif
15516}
15517
15518
15519VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15520{
15521 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15522 return VERR_NOT_IMPLEMENTED;
15523}
15524
15525
15526VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15527{
15528 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15529 return VERR_NOT_IMPLEMENTED;
15530}
15531
15532
15533#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15534/**
15535 * Executes a IRET instruction with default operand size.
15536 *
15537 * This is for PATM.
15538 *
15539 * @returns VBox status code.
15540 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15541 * @param pCtxCore The register frame.
15542 */
15543VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15544{
15545 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15546
15547 iemCtxCoreToCtx(pCtx, pCtxCore);
15548 iemInitDecoder(pVCpu);
15549 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15550 if (rcStrict == VINF_SUCCESS)
15551 iemCtxToCtxCore(pCtxCore, pCtx);
15552 else
15553 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15554 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15555 return rcStrict;
15556}
15557#endif
15558
15559
15560/**
15561 * Macro used by the IEMExec* method to check the given instruction length.
15562 *
15563 * Will return on failure!
15564 *
15565 * @param a_cbInstr The given instruction length.
15566 * @param a_cbMin The minimum length.
15567 */
15568#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15569 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15570 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15571
15572
15573/**
15574 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15575 *
15576 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15577 *
15578 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15580 * @param rcStrict The status code to fiddle.
15581 */
15582DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15583{
15584 iemUninitExec(pVCpu);
15585#ifdef IN_RC
15586 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15587 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15588#else
15589 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15590#endif
15591}
15592
15593
15594/**
15595 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15596 *
15597 * This API ASSUMES that the caller has already verified that the guest code is
15598 * allowed to access the I/O port. (The I/O port is in the DX register in the
15599 * guest state.)
15600 *
15601 * @returns Strict VBox status code.
15602 * @param pVCpu The cross context virtual CPU structure.
15603 * @param cbValue The size of the I/O port access (1, 2, or 4).
15604 * @param enmAddrMode The addressing mode.
15605 * @param fRepPrefix Indicates whether a repeat prefix is used
15606 * (doesn't matter which for this instruction).
15607 * @param cbInstr The instruction length in bytes.
15608 * @param iEffSeg The effective segment address.
15609 * @param fIoChecked Whether the access to the I/O port has been
15610 * checked or not. It's typically checked in the
15611 * HM scenario.
15612 */
15613VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15614 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15615{
15616 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15617 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15618
15619 /*
15620 * State init.
15621 */
15622 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15623
15624 /*
15625 * Switch orgy for getting to the right handler.
15626 */
15627 VBOXSTRICTRC rcStrict;
15628 if (fRepPrefix)
15629 {
15630 switch (enmAddrMode)
15631 {
15632 case IEMMODE_16BIT:
15633 switch (cbValue)
15634 {
15635 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15636 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15637 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15638 default:
15639 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15640 }
15641 break;
15642
15643 case IEMMODE_32BIT:
15644 switch (cbValue)
15645 {
15646 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15647 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15648 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15649 default:
15650 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15651 }
15652 break;
15653
15654 case IEMMODE_64BIT:
15655 switch (cbValue)
15656 {
15657 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15658 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15659 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15660 default:
15661 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15662 }
15663 break;
15664
15665 default:
15666 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15667 }
15668 }
15669 else
15670 {
15671 switch (enmAddrMode)
15672 {
15673 case IEMMODE_16BIT:
15674 switch (cbValue)
15675 {
15676 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15677 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15678 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15679 default:
15680 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15681 }
15682 break;
15683
15684 case IEMMODE_32BIT:
15685 switch (cbValue)
15686 {
15687 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15688 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15689 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15690 default:
15691 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15692 }
15693 break;
15694
15695 case IEMMODE_64BIT:
15696 switch (cbValue)
15697 {
15698 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15699 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15700 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15701 default:
15702 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15703 }
15704 break;
15705
15706 default:
15707 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15708 }
15709 }
15710
15711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15712}
15713
15714
15715/**
15716 * Interface for HM and EM for executing string I/O IN (read) instructions.
15717 *
15718 * This API ASSUMES that the caller has already verified that the guest code is
15719 * allowed to access the I/O port. (The I/O port is in the DX register in the
15720 * guest state.)
15721 *
15722 * @returns Strict VBox status code.
15723 * @param pVCpu The cross context virtual CPU structure.
15724 * @param cbValue The size of the I/O port access (1, 2, or 4).
15725 * @param enmAddrMode The addressing mode.
15726 * @param fRepPrefix Indicates whether a repeat prefix is used
15727 * (doesn't matter which for this instruction).
15728 * @param cbInstr The instruction length in bytes.
15729 * @param fIoChecked Whether the access to the I/O port has been
15730 * checked or not. It's typically checked in the
15731 * HM scenario.
15732 */
15733VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15734 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15735{
15736 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15737
15738 /*
15739 * State init.
15740 */
15741 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15742
15743 /*
15744 * Switch orgy for getting to the right handler.
15745 */
15746 VBOXSTRICTRC rcStrict;
15747 if (fRepPrefix)
15748 {
15749 switch (enmAddrMode)
15750 {
15751 case IEMMODE_16BIT:
15752 switch (cbValue)
15753 {
15754 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15755 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15756 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15757 default:
15758 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15759 }
15760 break;
15761
15762 case IEMMODE_32BIT:
15763 switch (cbValue)
15764 {
15765 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15766 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15767 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15768 default:
15769 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15770 }
15771 break;
15772
15773 case IEMMODE_64BIT:
15774 switch (cbValue)
15775 {
15776 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15777 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15778 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15779 default:
15780 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15781 }
15782 break;
15783
15784 default:
15785 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15786 }
15787 }
15788 else
15789 {
15790 switch (enmAddrMode)
15791 {
15792 case IEMMODE_16BIT:
15793 switch (cbValue)
15794 {
15795 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15796 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15797 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15798 default:
15799 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15800 }
15801 break;
15802
15803 case IEMMODE_32BIT:
15804 switch (cbValue)
15805 {
15806 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15807 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15808 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15809 default:
15810 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15811 }
15812 break;
15813
15814 case IEMMODE_64BIT:
15815 switch (cbValue)
15816 {
15817 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15818 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15819 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15820 default:
15821 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15822 }
15823 break;
15824
15825 default:
15826 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15827 }
15828 }
15829
15830 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15831}
15832
15833
15834/**
15835 * Interface for rawmode to write execute an OUT instruction.
15836 *
15837 * @returns Strict VBox status code.
15838 * @param pVCpu The cross context virtual CPU structure.
15839 * @param cbInstr The instruction length in bytes.
15840 * @param u16Port The port to read.
15841 * @param cbReg The register size.
15842 *
15843 * @remarks In ring-0 not all of the state needs to be synced in.
15844 */
15845VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15846{
15847 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15848 Assert(cbReg <= 4 && cbReg != 3);
15849
15850 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15851 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15852 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15853}
15854
15855
15856/**
15857 * Interface for rawmode to write execute an IN instruction.
15858 *
15859 * @returns Strict VBox status code.
15860 * @param pVCpu The cross context virtual CPU structure.
15861 * @param cbInstr The instruction length in bytes.
15862 * @param u16Port The port to read.
15863 * @param cbReg The register size.
15864 */
15865VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15866{
15867 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15868 Assert(cbReg <= 4 && cbReg != 3);
15869
15870 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15871 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15872 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15873}
15874
15875
15876/**
15877 * Interface for HM and EM to write to a CRx register.
15878 *
15879 * @returns Strict VBox status code.
15880 * @param pVCpu The cross context virtual CPU structure.
15881 * @param cbInstr The instruction length in bytes.
15882 * @param iCrReg The control register number (destination).
15883 * @param iGReg The general purpose register number (source).
15884 *
15885 * @remarks In ring-0 not all of the state needs to be synced in.
15886 */
15887VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15888{
15889 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15890 Assert(iCrReg < 16);
15891 Assert(iGReg < 16);
15892
15893 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15894 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15895 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15896}
15897
15898
15899/**
15900 * Interface for HM and EM to read from a CRx register.
15901 *
15902 * @returns Strict VBox status code.
15903 * @param pVCpu The cross context virtual CPU structure.
15904 * @param cbInstr The instruction length in bytes.
15905 * @param iGReg The general purpose register number (destination).
15906 * @param iCrReg The control register number (source).
15907 *
15908 * @remarks In ring-0 not all of the state needs to be synced in.
15909 */
15910VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15911{
15912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15913 Assert(iCrReg < 16);
15914 Assert(iGReg < 16);
15915
15916 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15917 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15918 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15919}
15920
15921
15922/**
15923 * Interface for HM and EM to clear the CR0[TS] bit.
15924 *
15925 * @returns Strict VBox status code.
15926 * @param pVCpu The cross context virtual CPU structure.
15927 * @param cbInstr The instruction length in bytes.
15928 *
15929 * @remarks In ring-0 not all of the state needs to be synced in.
15930 */
15931VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15932{
15933 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15934
15935 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15936 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15938}
15939
15940
15941/**
15942 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15943 *
15944 * @returns Strict VBox status code.
15945 * @param pVCpu The cross context virtual CPU structure.
15946 * @param cbInstr The instruction length in bytes.
15947 * @param uValue The value to load into CR0.
15948 *
15949 * @remarks In ring-0 not all of the state needs to be synced in.
15950 */
15951VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15952{
15953 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15954
15955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15958}
15959
15960
15961/**
15962 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15963 *
15964 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15965 *
15966 * @returns Strict VBox status code.
15967 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15968 * @param cbInstr The instruction length in bytes.
15969 * @remarks In ring-0 not all of the state needs to be synced in.
15970 * @thread EMT(pVCpu)
15971 */
15972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15973{
15974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15975
15976 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15977 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15978 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15979}
15980
15981
15982/**
15983 * Checks if IEM is in the process of delivering an event (interrupt or
15984 * exception).
15985 *
15986 * @returns true if we're in the process of raising an interrupt or exception,
15987 * false otherwise.
15988 * @param pVCpu The cross context virtual CPU structure.
15989 * @param puVector Where to store the vector associated with the
15990 * currently delivered event, optional.
15991 * @param pfFlags Where to store th event delivery flags (see
15992 * IEM_XCPT_FLAGS_XXX), optional.
15993 * @param puErr Where to store the error code associated with the
15994 * event, optional.
15995 * @param puCr2 Where to store the CR2 associated with the event,
15996 * optional.
15997 * @remarks The caller should check the flags to determine if the error code and
15998 * CR2 are valid for the event.
15999 */
16000VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16001{
16002 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16003 if (fRaisingXcpt)
16004 {
16005 if (puVector)
16006 *puVector = pVCpu->iem.s.uCurXcpt;
16007 if (pfFlags)
16008 *pfFlags = pVCpu->iem.s.fCurXcpt;
16009 if (puErr)
16010 *puErr = pVCpu->iem.s.uCurXcptErr;
16011 if (puCr2)
16012 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16013 }
16014 return fRaisingXcpt;
16015}
16016
16017
16018#ifdef VBOX_WITH_NESTED_HWVIRT
16019/**
16020 * Interface for HM and EM to emulate the STGI instruction.
16021 *
16022 * @returns Strict VBox status code.
16023 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16024 * @param cbInstr The instruction length in bytes.
16025 * @thread EMT(pVCpu)
16026 */
16027VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16028{
16029 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16030
16031 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16032 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16033 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16034}
16035
16036
16037/**
16038 * Interface for HM and EM to emulate the STGI instruction.
16039 *
16040 * @returns Strict VBox status code.
16041 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16042 * @param cbInstr The instruction length in bytes.
16043 * @thread EMT(pVCpu)
16044 */
16045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16046{
16047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16048
16049 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16050 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16052}
16053
16054
16055/**
16056 * Interface for HM and EM to emulate the VMLOAD instruction.
16057 *
16058 * @returns Strict VBox status code.
16059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16060 * @param cbInstr The instruction length in bytes.
16061 * @thread EMT(pVCpu)
16062 */
16063VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16064{
16065 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16066
16067 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16068 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16069 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16070}
16071
16072
16073/**
16074 * Interface for HM and EM to emulate the VMSAVE instruction.
16075 *
16076 * @returns Strict VBox status code.
16077 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16078 * @param cbInstr The instruction length in bytes.
16079 * @thread EMT(pVCpu)
16080 */
16081VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16082{
16083 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16084
16085 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16086 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16087 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16088}
16089
16090
16091/**
16092 * Interface for HM and EM to emulate the INVLPGA instruction.
16093 *
16094 * @returns Strict VBox status code.
16095 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16096 * @param cbInstr The instruction length in bytes.
16097 * @thread EMT(pVCpu)
16098 */
16099VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16100{
16101 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16102
16103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16104 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16106}
16107#endif /* VBOX_WITH_NESTED_HWVIRT */
16108
16109#ifdef IN_RING3
16110
16111/**
16112 * Handles the unlikely and probably fatal merge cases.
16113 *
16114 * @returns Merged status code.
16115 * @param rcStrict Current EM status code.
16116 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16117 * with @a rcStrict.
16118 * @param iMemMap The memory mapping index. For error reporting only.
16119 * @param pVCpu The cross context virtual CPU structure of the calling
16120 * thread, for error reporting only.
16121 */
16122DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16123 unsigned iMemMap, PVMCPU pVCpu)
16124{
16125 if (RT_FAILURE_NP(rcStrict))
16126 return rcStrict;
16127
16128 if (RT_FAILURE_NP(rcStrictCommit))
16129 return rcStrictCommit;
16130
16131 if (rcStrict == rcStrictCommit)
16132 return rcStrictCommit;
16133
16134 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16135 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16139 return VERR_IOM_FF_STATUS_IPE;
16140}
16141
16142
16143/**
16144 * Helper for IOMR3ProcessForceFlag.
16145 *
16146 * @returns Merged status code.
16147 * @param rcStrict Current EM status code.
16148 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16149 * with @a rcStrict.
16150 * @param iMemMap The memory mapping index. For error reporting only.
16151 * @param pVCpu The cross context virtual CPU structure of the calling
16152 * thread, for error reporting only.
16153 */
16154DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16155{
16156 /* Simple. */
16157 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16158 return rcStrictCommit;
16159
16160 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16161 return rcStrict;
16162
16163 /* EM scheduling status codes. */
16164 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16165 && rcStrict <= VINF_EM_LAST))
16166 {
16167 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16168 && rcStrictCommit <= VINF_EM_LAST))
16169 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16170 }
16171
16172 /* Unlikely */
16173 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16174}
16175
16176
16177/**
16178 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16179 *
16180 * @returns Merge between @a rcStrict and what the commit operation returned.
16181 * @param pVM The cross context VM structure.
16182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16183 * @param rcStrict The status code returned by ring-0 or raw-mode.
16184 */
16185VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16186{
16187 /*
16188 * Reset the pending commit.
16189 */
16190 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16191 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16192 ("%#x %#x %#x\n",
16193 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16194 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16195
16196 /*
16197 * Commit the pending bounce buffers (usually just one).
16198 */
16199 unsigned cBufs = 0;
16200 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16201 while (iMemMap-- > 0)
16202 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16203 {
16204 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16205 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16206 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16207
16208 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16209 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16210 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16211
16212 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16213 {
16214 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16216 pbBuf,
16217 cbFirst,
16218 PGMACCESSORIGIN_IEM);
16219 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16220 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16221 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16222 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16223 }
16224
16225 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16226 {
16227 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16229 pbBuf + cbFirst,
16230 cbSecond,
16231 PGMACCESSORIGIN_IEM);
16232 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16233 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16234 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16235 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16236 }
16237 cBufs++;
16238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16239 }
16240
16241 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16242 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16243 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16244 pVCpu->iem.s.cActiveMappings = 0;
16245 return rcStrict;
16246}
16247
16248#endif /* IN_RING3 */
16249
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette