VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66604

Last change on this file since 66604 was 66604, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: SVM bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 599.8 KB
Line 
1/* $Id: IEMAll.cpp 66604 2017-04-19 06:44:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218
219/*********************************************************************************************************************************
220* Defined Constants And Macros *
221*********************************************************************************************************************************/
222/** @def IEM_WITH_SETJMP
223 * Enables alternative status code handling using setjmps.
224 *
225 * This adds a bit of expense via the setjmp() call since it saves all the
226 * non-volatile registers. However, it eliminates return code checks and allows
227 * for more optimal return value passing (return regs instead of stack buffer).
228 */
229#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
230# define IEM_WITH_SETJMP
231#endif
232
233/** Temporary hack to disable the double execution. Will be removed in favor
234 * of a dedicated execution mode in EM. */
235//#define IEM_VERIFICATION_MODE_NO_REM
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/** @def IEM_USE_UNALIGNED_DATA_ACCESS
363 * Use unaligned accesses instead of elaborate byte assembly. */
364#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
365# define IEM_USE_UNALIGNED_DATA_ACCESS
366#endif
367
368#ifdef VBOX_WITH_NESTED_HWVIRT
369/**
370 * Check the common SVM instruction preconditions.
371 */
372# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
373 do { \
374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
375 { \
376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
377 return iemRaiseUndefinedOpcode(pVCpu); \
378 } \
379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
380 { \
381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
382 return iemRaiseUndefinedOpcode(pVCpu); \
383 } \
384 if (pVCpu->iem.s.uCpl != 0) \
385 { \
386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
387 return iemRaiseGeneralProtectionFault0(pVCpu); \
388 } \
389 } while (0)
390
391/**
392 * Check if an SVM is enabled.
393 */
394# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if an SVM control/instruction intercept is set.
398 */
399# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
400
401/**
402 * Check if an SVM read CRx intercept is set.
403 */
404# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
405
406/**
407 * Check if an SVM write CRx intercept is set.
408 */
409# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
410
411/**
412 * Check if an SVM read DRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
415
416/**
417 * Check if an SVM write DRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
420
421/**
422 * Check if an SVM exception intercept is set.
423 */
424# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
425
426/**
427 * Invokes the SVM \#VMEXIT handler for the nested-guest.
428 */
429# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
430 do \
431 { \
432 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
433 (a_uExitInfo2)); \
434 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
435 } while (0)
436
437/**
438 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
439 * corresponding decode assist information.
440 */
441# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
442 do \
443 { \
444 uint64_t uExitInfo1; \
445 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
446 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
447 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
448 else \
449 uExitInfo1 = 0; \
450 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
451 } while (0)
452
453/**
454 * Checks and handles an SVM MSR intercept.
455 */
456# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
457 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
458
459#else
460# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
461# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
462# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
463# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
464# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
465# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
466# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
467# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
468# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
469# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
470# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
471
472#endif /* VBOX_WITH_NESTED_HWVIRT */
473
474
475/*********************************************************************************************************************************
476* Global Variables *
477*********************************************************************************************************************************/
478extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
479
480
481/** Function table for the ADD instruction. */
482IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
483{
484 iemAImpl_add_u8, iemAImpl_add_u8_locked,
485 iemAImpl_add_u16, iemAImpl_add_u16_locked,
486 iemAImpl_add_u32, iemAImpl_add_u32_locked,
487 iemAImpl_add_u64, iemAImpl_add_u64_locked
488};
489
490/** Function table for the ADC instruction. */
491IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
492{
493 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
494 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
495 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
496 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
497};
498
499/** Function table for the SUB instruction. */
500IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
501{
502 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
503 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
504 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
505 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
506};
507
508/** Function table for the SBB instruction. */
509IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
510{
511 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
512 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
513 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
514 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
515};
516
517/** Function table for the OR instruction. */
518IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
519{
520 iemAImpl_or_u8, iemAImpl_or_u8_locked,
521 iemAImpl_or_u16, iemAImpl_or_u16_locked,
522 iemAImpl_or_u32, iemAImpl_or_u32_locked,
523 iemAImpl_or_u64, iemAImpl_or_u64_locked
524};
525
526/** Function table for the XOR instruction. */
527IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
528{
529 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
530 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
531 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
532 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
533};
534
535/** Function table for the AND instruction. */
536IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
537{
538 iemAImpl_and_u8, iemAImpl_and_u8_locked,
539 iemAImpl_and_u16, iemAImpl_and_u16_locked,
540 iemAImpl_and_u32, iemAImpl_and_u32_locked,
541 iemAImpl_and_u64, iemAImpl_and_u64_locked
542};
543
544/** Function table for the CMP instruction.
545 * @remarks Making operand order ASSUMPTIONS.
546 */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
548{
549 iemAImpl_cmp_u8, NULL,
550 iemAImpl_cmp_u16, NULL,
551 iemAImpl_cmp_u32, NULL,
552 iemAImpl_cmp_u64, NULL
553};
554
555/** Function table for the TEST instruction.
556 * @remarks Making operand order ASSUMPTIONS.
557 */
558IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
559{
560 iemAImpl_test_u8, NULL,
561 iemAImpl_test_u16, NULL,
562 iemAImpl_test_u32, NULL,
563 iemAImpl_test_u64, NULL
564};
565
566/** Function table for the BT instruction. */
567IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
568{
569 NULL, NULL,
570 iemAImpl_bt_u16, NULL,
571 iemAImpl_bt_u32, NULL,
572 iemAImpl_bt_u64, NULL
573};
574
575/** Function table for the BTC instruction. */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
577{
578 NULL, NULL,
579 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
580 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
581 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
582};
583
584/** Function table for the BTR instruction. */
585IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
586{
587 NULL, NULL,
588 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
589 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
590 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
591};
592
593/** Function table for the BTS instruction. */
594IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
595{
596 NULL, NULL,
597 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
598 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
599 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
600};
601
602/** Function table for the BSF instruction. */
603IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
604{
605 NULL, NULL,
606 iemAImpl_bsf_u16, NULL,
607 iemAImpl_bsf_u32, NULL,
608 iemAImpl_bsf_u64, NULL
609};
610
611/** Function table for the BSR instruction. */
612IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
613{
614 NULL, NULL,
615 iemAImpl_bsr_u16, NULL,
616 iemAImpl_bsr_u32, NULL,
617 iemAImpl_bsr_u64, NULL
618};
619
620/** Function table for the IMUL instruction. */
621IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
622{
623 NULL, NULL,
624 iemAImpl_imul_two_u16, NULL,
625 iemAImpl_imul_two_u32, NULL,
626 iemAImpl_imul_two_u64, NULL
627};
628
629/** Group 1 /r lookup table. */
630IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
631{
632 &g_iemAImpl_add,
633 &g_iemAImpl_or,
634 &g_iemAImpl_adc,
635 &g_iemAImpl_sbb,
636 &g_iemAImpl_and,
637 &g_iemAImpl_sub,
638 &g_iemAImpl_xor,
639 &g_iemAImpl_cmp
640};
641
642/** Function table for the INC instruction. */
643IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
644{
645 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
646 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
647 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
648 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
649};
650
651/** Function table for the DEC instruction. */
652IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
653{
654 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
655 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
656 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
657 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
658};
659
660/** Function table for the NEG instruction. */
661IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
662{
663 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
664 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
665 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
666 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
667};
668
669/** Function table for the NOT instruction. */
670IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
671{
672 iemAImpl_not_u8, iemAImpl_not_u8_locked,
673 iemAImpl_not_u16, iemAImpl_not_u16_locked,
674 iemAImpl_not_u32, iemAImpl_not_u32_locked,
675 iemAImpl_not_u64, iemAImpl_not_u64_locked
676};
677
678
679/** Function table for the ROL instruction. */
680IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
681{
682 iemAImpl_rol_u8,
683 iemAImpl_rol_u16,
684 iemAImpl_rol_u32,
685 iemAImpl_rol_u64
686};
687
688/** Function table for the ROR instruction. */
689IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
690{
691 iemAImpl_ror_u8,
692 iemAImpl_ror_u16,
693 iemAImpl_ror_u32,
694 iemAImpl_ror_u64
695};
696
697/** Function table for the RCL instruction. */
698IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
699{
700 iemAImpl_rcl_u8,
701 iemAImpl_rcl_u16,
702 iemAImpl_rcl_u32,
703 iemAImpl_rcl_u64
704};
705
706/** Function table for the RCR instruction. */
707IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
708{
709 iemAImpl_rcr_u8,
710 iemAImpl_rcr_u16,
711 iemAImpl_rcr_u32,
712 iemAImpl_rcr_u64
713};
714
715/** Function table for the SHL instruction. */
716IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
717{
718 iemAImpl_shl_u8,
719 iemAImpl_shl_u16,
720 iemAImpl_shl_u32,
721 iemAImpl_shl_u64
722};
723
724/** Function table for the SHR instruction. */
725IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
726{
727 iemAImpl_shr_u8,
728 iemAImpl_shr_u16,
729 iemAImpl_shr_u32,
730 iemAImpl_shr_u64
731};
732
733/** Function table for the SAR instruction. */
734IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
735{
736 iemAImpl_sar_u8,
737 iemAImpl_sar_u16,
738 iemAImpl_sar_u32,
739 iemAImpl_sar_u64
740};
741
742
743/** Function table for the MUL instruction. */
744IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
745{
746 iemAImpl_mul_u8,
747 iemAImpl_mul_u16,
748 iemAImpl_mul_u32,
749 iemAImpl_mul_u64
750};
751
752/** Function table for the IMUL instruction working implicitly on rAX. */
753IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
754{
755 iemAImpl_imul_u8,
756 iemAImpl_imul_u16,
757 iemAImpl_imul_u32,
758 iemAImpl_imul_u64
759};
760
761/** Function table for the DIV instruction. */
762IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
763{
764 iemAImpl_div_u8,
765 iemAImpl_div_u16,
766 iemAImpl_div_u32,
767 iemAImpl_div_u64
768};
769
770/** Function table for the MUL instruction. */
771IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
772{
773 iemAImpl_idiv_u8,
774 iemAImpl_idiv_u16,
775 iemAImpl_idiv_u32,
776 iemAImpl_idiv_u64
777};
778
779/** Function table for the SHLD instruction */
780IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
781{
782 iemAImpl_shld_u16,
783 iemAImpl_shld_u32,
784 iemAImpl_shld_u64,
785};
786
787/** Function table for the SHRD instruction */
788IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
789{
790 iemAImpl_shrd_u16,
791 iemAImpl_shrd_u32,
792 iemAImpl_shrd_u64,
793};
794
795
796/** Function table for the PUNPCKLBW instruction */
797IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
798/** Function table for the PUNPCKLBD instruction */
799IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
800/** Function table for the PUNPCKLDQ instruction */
801IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
802/** Function table for the PUNPCKLQDQ instruction */
803IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
804
805/** Function table for the PUNPCKHBW instruction */
806IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
807/** Function table for the PUNPCKHBD instruction */
808IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
809/** Function table for the PUNPCKHDQ instruction */
810IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
811/** Function table for the PUNPCKHQDQ instruction */
812IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
813
814/** Function table for the PXOR instruction */
815IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
816/** Function table for the PCMPEQB instruction */
817IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
818/** Function table for the PCMPEQW instruction */
819IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
820/** Function table for the PCMPEQD instruction */
821IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
822
823
824#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
825/** What IEM just wrote. */
826uint8_t g_abIemWrote[256];
827/** How much IEM just wrote. */
828size_t g_cbIemWrote;
829#endif
830
831
832/*********************************************************************************************************************************
833* Internal Functions *
834*********************************************************************************************************************************/
835IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
836IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
837IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
838IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
839/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
840IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
841IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
842IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
843IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
844IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
845IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
846IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
847IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
848IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
849IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
850IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
851IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
852#ifdef IEM_WITH_SETJMP
853DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
854DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
855DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
856DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
857DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858#endif
859
860IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
862IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
863IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
864IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
865IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
866IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
867IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
868IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
869IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
870IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
871IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
872IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
873IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
874IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
875IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
876
877#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
878IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
879#endif
880IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
881IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
882
883#ifdef VBOX_WITH_NESTED_HWVIRT
884/**
885 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
886 * accordingly.
887 *
888 * @returns VBox strict status code.
889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
890 * @param u16Port The IO port being accessed.
891 * @param enmIoType The type of IO access.
892 * @param cbReg The IO operand size in bytes.
893 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
894 * @param iEffSeg The effective segment number.
895 * @param fRep Whether this is a repeating IO instruction (REP prefix).
896 * @param fStrIo Whether this is a string IO instruction.
897 * @param cbInstr The length of the IO instruction in bytes.
898 *
899 * @remarks This must be called only when IO instructions are intercepted by the
900 * nested-guest hypervisor.
901 */
902IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
903 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
904{
905 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
906 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
907 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
908
909 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
910 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
911
912 SVMIOIOEXITINFO IoExitInfo;
913 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
914 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
915 IoExitInfo.n.u1STR = fStrIo;
916 IoExitInfo.n.u1REP = fRep;
917 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
918 IoExitInfo.n.u1Type = enmIoType;
919 IoExitInfo.n.u16Port = u16Port;
920
921 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
922 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
923}
924
925#else
926IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
927 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
928{
929 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
930 return VERR_IEM_IPE_9;
931}
932#endif /* VBOX_WITH_NESTED_HWVIRT */
933
934
935/**
936 * Sets the pass up status.
937 *
938 * @returns VINF_SUCCESS.
939 * @param pVCpu The cross context virtual CPU structure of the
940 * calling thread.
941 * @param rcPassUp The pass up status. Must be informational.
942 * VINF_SUCCESS is not allowed.
943 */
944IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
945{
946 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
947
948 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
949 if (rcOldPassUp == VINF_SUCCESS)
950 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
951 /* If both are EM scheduling codes, use EM priority rules. */
952 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
953 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
954 {
955 if (rcPassUp < rcOldPassUp)
956 {
957 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
958 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
959 }
960 else
961 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
962 }
963 /* Override EM scheduling with specific status code. */
964 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
965 {
966 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
967 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
968 }
969 /* Don't override specific status code, first come first served. */
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 return VINF_SUCCESS;
973}
974
975
976/**
977 * Calculates the CPU mode.
978 *
979 * This is mainly for updating IEMCPU::enmCpuMode.
980 *
981 * @returns CPU mode.
982 * @param pCtx The register context for the CPU.
983 */
984DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
985{
986 if (CPUMIsGuestIn64BitCodeEx(pCtx))
987 return IEMMODE_64BIT;
988 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
989 return IEMMODE_32BIT;
990 return IEMMODE_16BIT;
991}
992
993
994/**
995 * Initializes the execution state.
996 *
997 * @param pVCpu The cross context virtual CPU structure of the
998 * calling thread.
999 * @param fBypassHandlers Whether to bypass access handlers.
1000 *
1001 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1002 * side-effects in strict builds.
1003 */
1004DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1005{
1006 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1007
1008 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1009
1010#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1011 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1012 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1015 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1019#endif
1020
1021#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1022 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1023#endif
1024 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1025 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1026#ifdef VBOX_STRICT
1027 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1028 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1029 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1030 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1031 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1032 pVCpu->iem.s.uRexReg = 127;
1033 pVCpu->iem.s.uRexB = 127;
1034 pVCpu->iem.s.uRexIndex = 127;
1035 pVCpu->iem.s.iEffSeg = 127;
1036 pVCpu->iem.s.idxPrefix = 127;
1037 pVCpu->iem.s.uVex3rdReg = 127;
1038 pVCpu->iem.s.uVexLength = 127;
1039 pVCpu->iem.s.fEvexStuff = 127;
1040 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1041# ifdef IEM_WITH_CODE_TLB
1042 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1043 pVCpu->iem.s.pbInstrBuf = NULL;
1044 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1045 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1046 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1047 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1048# else
1049 pVCpu->iem.s.offOpcode = 127;
1050 pVCpu->iem.s.cbOpcode = 127;
1051# endif
1052#endif
1053
1054 pVCpu->iem.s.cActiveMappings = 0;
1055 pVCpu->iem.s.iNextMapping = 0;
1056 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1057 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1058#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1059 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1060 && pCtx->cs.u64Base == 0
1061 && pCtx->cs.u32Limit == UINT32_MAX
1062 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1063 if (!pVCpu->iem.s.fInPatchCode)
1064 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1065#endif
1066
1067#ifdef IEM_VERIFICATION_MODE_FULL
1068 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1069 pVCpu->iem.s.fNoRem = true;
1070#endif
1071}
1072
1073
1074/**
1075 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1076 *
1077 * @param pVCpu The cross context virtual CPU structure of the
1078 * calling thread.
1079 */
1080DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1081{
1082 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1083#ifdef IEM_VERIFICATION_MODE_FULL
1084 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1085#endif
1086#ifdef VBOX_STRICT
1087# ifdef IEM_WITH_CODE_TLB
1088 NOREF(pVCpu);
1089# else
1090 pVCpu->iem.s.cbOpcode = 0;
1091# endif
1092#else
1093 NOREF(pVCpu);
1094#endif
1095}
1096
1097
1098/**
1099 * Initializes the decoder state.
1100 *
1101 * iemReInitDecoder is mostly a copy of this function.
1102 *
1103 * @param pVCpu The cross context virtual CPU structure of the
1104 * calling thread.
1105 * @param fBypassHandlers Whether to bypass access handlers.
1106 */
1107DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1108{
1109 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1110
1111 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1112
1113#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1122#endif
1123
1124#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1125 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1126#endif
1127 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1128#ifdef IEM_VERIFICATION_MODE_FULL
1129 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1130 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1131#endif
1132 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1133 pVCpu->iem.s.enmCpuMode = enmMode;
1134 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1135 pVCpu->iem.s.enmEffAddrMode = enmMode;
1136 if (enmMode != IEMMODE_64BIT)
1137 {
1138 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1139 pVCpu->iem.s.enmEffOpSize = enmMode;
1140 }
1141 else
1142 {
1143 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1144 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1145 }
1146 pVCpu->iem.s.fPrefixes = 0;
1147 pVCpu->iem.s.uRexReg = 0;
1148 pVCpu->iem.s.uRexB = 0;
1149 pVCpu->iem.s.uRexIndex = 0;
1150 pVCpu->iem.s.idxPrefix = 0;
1151 pVCpu->iem.s.uVex3rdReg = 0;
1152 pVCpu->iem.s.uVexLength = 0;
1153 pVCpu->iem.s.fEvexStuff = 0;
1154 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1155#ifdef IEM_WITH_CODE_TLB
1156 pVCpu->iem.s.pbInstrBuf = NULL;
1157 pVCpu->iem.s.offInstrNextByte = 0;
1158 pVCpu->iem.s.offCurInstrStart = 0;
1159# ifdef VBOX_STRICT
1160 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1161 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1162 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1163# endif
1164#else
1165 pVCpu->iem.s.offOpcode = 0;
1166 pVCpu->iem.s.cbOpcode = 0;
1167#endif
1168 pVCpu->iem.s.cActiveMappings = 0;
1169 pVCpu->iem.s.iNextMapping = 0;
1170 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1171 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1172#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1173 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1174 && pCtx->cs.u64Base == 0
1175 && pCtx->cs.u32Limit == UINT32_MAX
1176 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1177 if (!pVCpu->iem.s.fInPatchCode)
1178 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1179#endif
1180
1181#ifdef DBGFTRACE_ENABLED
1182 switch (enmMode)
1183 {
1184 case IEMMODE_64BIT:
1185 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1186 break;
1187 case IEMMODE_32BIT:
1188 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1189 break;
1190 case IEMMODE_16BIT:
1191 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1192 break;
1193 }
1194#endif
1195}
1196
1197
1198/**
1199 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1200 *
1201 * This is mostly a copy of iemInitDecoder.
1202 *
1203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1204 */
1205DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1206{
1207 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1208
1209 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1210
1211#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1220#endif
1221
1222 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1223#ifdef IEM_VERIFICATION_MODE_FULL
1224 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1225 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1226#endif
1227 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1228 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1229 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1230 pVCpu->iem.s.enmEffAddrMode = enmMode;
1231 if (enmMode != IEMMODE_64BIT)
1232 {
1233 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1234 pVCpu->iem.s.enmEffOpSize = enmMode;
1235 }
1236 else
1237 {
1238 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1239 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1240 }
1241 pVCpu->iem.s.fPrefixes = 0;
1242 pVCpu->iem.s.uRexReg = 0;
1243 pVCpu->iem.s.uRexB = 0;
1244 pVCpu->iem.s.uRexIndex = 0;
1245 pVCpu->iem.s.idxPrefix = 0;
1246 pVCpu->iem.s.uVex3rdReg = 0;
1247 pVCpu->iem.s.uVexLength = 0;
1248 pVCpu->iem.s.fEvexStuff = 0;
1249 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1250#ifdef IEM_WITH_CODE_TLB
1251 if (pVCpu->iem.s.pbInstrBuf)
1252 {
1253 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1254 - pVCpu->iem.s.uInstrBufPc;
1255 if (off < pVCpu->iem.s.cbInstrBufTotal)
1256 {
1257 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1258 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1259 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1260 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1261 else
1262 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1263 }
1264 else
1265 {
1266 pVCpu->iem.s.pbInstrBuf = NULL;
1267 pVCpu->iem.s.offInstrNextByte = 0;
1268 pVCpu->iem.s.offCurInstrStart = 0;
1269 pVCpu->iem.s.cbInstrBuf = 0;
1270 pVCpu->iem.s.cbInstrBufTotal = 0;
1271 }
1272 }
1273 else
1274 {
1275 pVCpu->iem.s.offInstrNextByte = 0;
1276 pVCpu->iem.s.offCurInstrStart = 0;
1277 pVCpu->iem.s.cbInstrBuf = 0;
1278 pVCpu->iem.s.cbInstrBufTotal = 0;
1279 }
1280#else
1281 pVCpu->iem.s.cbOpcode = 0;
1282 pVCpu->iem.s.offOpcode = 0;
1283#endif
1284 Assert(pVCpu->iem.s.cActiveMappings == 0);
1285 pVCpu->iem.s.iNextMapping = 0;
1286 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1287 Assert(pVCpu->iem.s.fBypassHandlers == false);
1288#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1289 if (!pVCpu->iem.s.fInPatchCode)
1290 { /* likely */ }
1291 else
1292 {
1293 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1294 && pCtx->cs.u64Base == 0
1295 && pCtx->cs.u32Limit == UINT32_MAX
1296 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1297 if (!pVCpu->iem.s.fInPatchCode)
1298 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1299 }
1300#endif
1301
1302#ifdef DBGFTRACE_ENABLED
1303 switch (enmMode)
1304 {
1305 case IEMMODE_64BIT:
1306 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1307 break;
1308 case IEMMODE_32BIT:
1309 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1310 break;
1311 case IEMMODE_16BIT:
1312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1313 break;
1314 }
1315#endif
1316}
1317
1318
1319
1320/**
1321 * Prefetch opcodes the first time when starting executing.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pVCpu The cross context virtual CPU structure of the
1325 * calling thread.
1326 * @param fBypassHandlers Whether to bypass access handlers.
1327 */
1328IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1329{
1330#ifdef IEM_VERIFICATION_MODE_FULL
1331 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1332#endif
1333 iemInitDecoder(pVCpu, fBypassHandlers);
1334
1335#ifdef IEM_WITH_CODE_TLB
1336 /** @todo Do ITLB lookup here. */
1337
1338#else /* !IEM_WITH_CODE_TLB */
1339
1340 /*
1341 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1342 *
1343 * First translate CS:rIP to a physical address.
1344 */
1345 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1346 uint32_t cbToTryRead;
1347 RTGCPTR GCPtrPC;
1348 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1349 {
1350 cbToTryRead = PAGE_SIZE;
1351 GCPtrPC = pCtx->rip;
1352 if (IEM_IS_CANONICAL(GCPtrPC))
1353 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1354 else
1355 return iemRaiseGeneralProtectionFault0(pVCpu);
1356 }
1357 else
1358 {
1359 uint32_t GCPtrPC32 = pCtx->eip;
1360 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1361 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1362 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1363 else
1364 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1365 if (cbToTryRead) { /* likely */ }
1366 else /* overflowed */
1367 {
1368 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1369 cbToTryRead = UINT32_MAX;
1370 }
1371 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1372 Assert(GCPtrPC <= UINT32_MAX);
1373 }
1374
1375# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1376 /* Allow interpretation of patch manager code blocks since they can for
1377 instance throw #PFs for perfectly good reasons. */
1378 if (pVCpu->iem.s.fInPatchCode)
1379 {
1380 size_t cbRead = 0;
1381 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1382 AssertRCReturn(rc, rc);
1383 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1384 return VINF_SUCCESS;
1385 }
1386# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1387
1388 RTGCPHYS GCPhys;
1389 uint64_t fFlags;
1390 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1391 if (RT_SUCCESS(rc)) { /* probable */ }
1392 else
1393 {
1394 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1395 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1396 }
1397 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1398 else
1399 {
1400 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1401 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1402 }
1403 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1404 else
1405 {
1406 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1407 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1408 }
1409 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1410 /** @todo Check reserved bits and such stuff. PGM is better at doing
1411 * that, so do it when implementing the guest virtual address
1412 * TLB... */
1413
1414# ifdef IEM_VERIFICATION_MODE_FULL
1415 /*
1416 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1417 * instruction.
1418 */
1419 /** @todo optimize this differently by not using PGMPhysRead. */
1420 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1421 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1422 if ( offPrevOpcodes < cbOldOpcodes
1423 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1424 {
1425 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1426 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1427 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1428 pVCpu->iem.s.cbOpcode = cbNew;
1429 return VINF_SUCCESS;
1430 }
1431# endif
1432
1433 /*
1434 * Read the bytes at this address.
1435 */
1436 PVM pVM = pVCpu->CTX_SUFF(pVM);
1437# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1438 size_t cbActual;
1439 if ( PATMIsEnabled(pVM)
1440 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1441 {
1442 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1443 Assert(cbActual > 0);
1444 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1445 }
1446 else
1447# endif
1448 {
1449 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1450 if (cbToTryRead > cbLeftOnPage)
1451 cbToTryRead = cbLeftOnPage;
1452 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1453 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1454
1455 if (!pVCpu->iem.s.fBypassHandlers)
1456 {
1457 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1458 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1459 { /* likely */ }
1460 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1461 {
1462 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1463 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1464 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1465 }
1466 else
1467 {
1468 Log((RT_SUCCESS(rcStrict)
1469 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1470 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1472 return rcStrict;
1473 }
1474 }
1475 else
1476 {
1477 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1478 if (RT_SUCCESS(rc))
1479 { /* likely */ }
1480 else
1481 {
1482 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1483 GCPtrPC, GCPhys, rc, cbToTryRead));
1484 return rc;
1485 }
1486 }
1487 pVCpu->iem.s.cbOpcode = cbToTryRead;
1488 }
1489#endif /* !IEM_WITH_CODE_TLB */
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Invalidates the IEM TLBs.
1496 *
1497 * This is called internally as well as by PGM when moving GC mappings.
1498 *
1499 * @returns
1500 * @param pVCpu The cross context virtual CPU structure of the calling
1501 * thread.
1502 * @param fVmm Set when PGM calls us with a remapping.
1503 */
1504VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1505{
1506#ifdef IEM_WITH_CODE_TLB
1507 pVCpu->iem.s.cbInstrBufTotal = 0;
1508 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1509 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1510 { /* very likely */ }
1511 else
1512 {
1513 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1515 while (i-- > 0)
1516 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1517 }
1518#endif
1519
1520#ifdef IEM_WITH_DATA_TLB
1521 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1522 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1523 { /* very likely */ }
1524 else
1525 {
1526 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1527 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1528 while (i-- > 0)
1529 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1530 }
1531#endif
1532 NOREF(pVCpu); NOREF(fVmm);
1533}
1534
1535
1536/**
1537 * Invalidates a page in the TLBs.
1538 *
1539 * @param pVCpu The cross context virtual CPU structure of the calling
1540 * thread.
1541 * @param GCPtr The address of the page to invalidate
1542 */
1543VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1544{
1545#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1546 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1547 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1549 uintptr_t idx = (uint8_t)GCPtr;
1550
1551# ifdef IEM_WITH_CODE_TLB
1552 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1553 {
1554 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1555 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1556 pVCpu->iem.s.cbInstrBufTotal = 0;
1557 }
1558# endif
1559
1560# ifdef IEM_WITH_DATA_TLB
1561 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1562 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1563# endif
1564#else
1565 NOREF(pVCpu); NOREF(GCPtr);
1566#endif
1567}
1568
1569
1570/**
1571 * Invalidates the host physical aspects of the IEM TLBs.
1572 *
1573 * This is called internally as well as by PGM when moving GC mappings.
1574 *
1575 * @param pVCpu The cross context virtual CPU structure of the calling
1576 * thread.
1577 */
1578VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1579{
1580#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1581 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1582
1583# ifdef IEM_WITH_CODE_TLB
1584 pVCpu->iem.s.cbInstrBufTotal = 0;
1585# endif
1586 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1587 if (uTlbPhysRev != 0)
1588 {
1589 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1590 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1591 }
1592 else
1593 {
1594 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1595 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1596
1597 unsigned i;
1598# ifdef IEM_WITH_CODE_TLB
1599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1600 while (i-- > 0)
1601 {
1602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1604 }
1605# endif
1606# ifdef IEM_WITH_DATA_TLB
1607 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1608 while (i-- > 0)
1609 {
1610 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1611 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1612 }
1613# endif
1614 }
1615#else
1616 NOREF(pVCpu);
1617#endif
1618}
1619
1620
1621/**
1622 * Invalidates the host physical aspects of the IEM TLBs.
1623 *
1624 * This is called internally as well as by PGM when moving GC mappings.
1625 *
1626 * @param pVM The cross context VM structure.
1627 *
1628 * @remarks Caller holds the PGM lock.
1629 */
1630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1631{
1632 RT_NOREF_PV(pVM);
1633}
1634
1635#ifdef IEM_WITH_CODE_TLB
1636
1637/**
1638 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1639 * failure and jumps.
1640 *
1641 * We end up here for a number of reasons:
1642 * - pbInstrBuf isn't yet initialized.
1643 * - Advancing beyond the buffer boundrary (e.g. cross page).
1644 * - Advancing beyond the CS segment limit.
1645 * - Fetching from non-mappable page (e.g. MMIO).
1646 *
1647 * @param pVCpu The cross context virtual CPU structure of the
1648 * calling thread.
1649 * @param pvDst Where to return the bytes.
1650 * @param cbDst Number of bytes to read.
1651 *
1652 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1653 */
1654IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1655{
1656#ifdef IN_RING3
1657//__debugbreak();
1658 for (;;)
1659 {
1660 Assert(cbDst <= 8);
1661 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1662
1663 /*
1664 * We might have a partial buffer match, deal with that first to make the
1665 * rest simpler. This is the first part of the cross page/buffer case.
1666 */
1667 if (pVCpu->iem.s.pbInstrBuf != NULL)
1668 {
1669 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1670 {
1671 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1672 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1673 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1674
1675 cbDst -= cbCopy;
1676 pvDst = (uint8_t *)pvDst + cbCopy;
1677 offBuf += cbCopy;
1678 pVCpu->iem.s.offInstrNextByte += offBuf;
1679 }
1680 }
1681
1682 /*
1683 * Check segment limit, figuring how much we're allowed to access at this point.
1684 *
1685 * We will fault immediately if RIP is past the segment limit / in non-canonical
1686 * territory. If we do continue, there are one or more bytes to read before we
1687 * end up in trouble and we need to do that first before faulting.
1688 */
1689 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1690 RTGCPTR GCPtrFirst;
1691 uint32_t cbMaxRead;
1692 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1693 {
1694 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1695 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1696 { /* likely */ }
1697 else
1698 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1699 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1700 }
1701 else
1702 {
1703 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1704 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1705 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1706 { /* likely */ }
1707 else
1708 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1709 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1710 if (cbMaxRead != 0)
1711 { /* likely */ }
1712 else
1713 {
1714 /* Overflowed because address is 0 and limit is max. */
1715 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1716 cbMaxRead = X86_PAGE_SIZE;
1717 }
1718 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1719 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1720 if (cbMaxRead2 < cbMaxRead)
1721 cbMaxRead = cbMaxRead2;
1722 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1723 }
1724
1725 /*
1726 * Get the TLB entry for this piece of code.
1727 */
1728 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1729 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1730 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1731 if (pTlbe->uTag == uTag)
1732 {
1733 /* likely when executing lots of code, otherwise unlikely */
1734# ifdef VBOX_WITH_STATISTICS
1735 pVCpu->iem.s.CodeTlb.cTlbHits++;
1736# endif
1737 }
1738 else
1739 {
1740 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1741# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1742 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1743 {
1744 pTlbe->uTag = uTag;
1745 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1746 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1747 pTlbe->GCPhys = NIL_RTGCPHYS;
1748 pTlbe->pbMappingR3 = NULL;
1749 }
1750 else
1751# endif
1752 {
1753 RTGCPHYS GCPhys;
1754 uint64_t fFlags;
1755 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1756 if (RT_FAILURE(rc))
1757 {
1758 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1759 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1760 }
1761
1762 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1763 pTlbe->uTag = uTag;
1764 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1765 pTlbe->GCPhys = GCPhys;
1766 pTlbe->pbMappingR3 = NULL;
1767 }
1768 }
1769
1770 /*
1771 * Check TLB page table level access flags.
1772 */
1773 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1774 {
1775 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1776 {
1777 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1778 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1779 }
1780 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1781 {
1782 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1783 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1784 }
1785 }
1786
1787# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1788 /*
1789 * Allow interpretation of patch manager code blocks since they can for
1790 * instance throw #PFs for perfectly good reasons.
1791 */
1792 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1793 { /* no unlikely */ }
1794 else
1795 {
1796 /** @todo Could be optimized this a little in ring-3 if we liked. */
1797 size_t cbRead = 0;
1798 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1799 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1800 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1801 return;
1802 }
1803# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1804
1805 /*
1806 * Look up the physical page info if necessary.
1807 */
1808 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1809 { /* not necessary */ }
1810 else
1811 {
1812 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1813 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1814 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1815 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1816 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1817 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1818 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1819 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1820 }
1821
1822# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1823 /*
1824 * Try do a direct read using the pbMappingR3 pointer.
1825 */
1826 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1827 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1828 {
1829 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1830 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1831 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1832 {
1833 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1834 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1835 }
1836 else
1837 {
1838 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1839 Assert(cbInstr < cbMaxRead);
1840 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1841 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1842 }
1843 if (cbDst <= cbMaxRead)
1844 {
1845 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1846 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1847 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1848 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1849 return;
1850 }
1851 pVCpu->iem.s.pbInstrBuf = NULL;
1852
1853 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1854 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1855 }
1856 else
1857# endif
1858#if 0
1859 /*
1860 * If there is no special read handling, so we can read a bit more and
1861 * put it in the prefetch buffer.
1862 */
1863 if ( cbDst < cbMaxRead
1864 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1865 {
1866 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1867 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1868 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1869 { /* likely */ }
1870 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1871 {
1872 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1873 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1874 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1875 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1876 }
1877 else
1878 {
1879 Log((RT_SUCCESS(rcStrict)
1880 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1881 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1882 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1883 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1884 }
1885 }
1886 /*
1887 * Special read handling, so only read exactly what's needed.
1888 * This is a highly unlikely scenario.
1889 */
1890 else
1891#endif
1892 {
1893 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1894 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1895 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1896 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1897 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1898 { /* likely */ }
1899 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1900 {
1901 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1902 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1903 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1904 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1905 }
1906 else
1907 {
1908 Log((RT_SUCCESS(rcStrict)
1909 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1910 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1911 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1912 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1913 }
1914 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1915 if (cbToRead == cbDst)
1916 return;
1917 }
1918
1919 /*
1920 * More to read, loop.
1921 */
1922 cbDst -= cbMaxRead;
1923 pvDst = (uint8_t *)pvDst + cbMaxRead;
1924 }
1925#else
1926 RT_NOREF(pvDst, cbDst);
1927 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1928#endif
1929}
1930
1931#else
1932
1933/**
1934 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1935 * exception if it fails.
1936 *
1937 * @returns Strict VBox status code.
1938 * @param pVCpu The cross context virtual CPU structure of the
1939 * calling thread.
1940 * @param cbMin The minimum number of bytes relative offOpcode
1941 * that must be read.
1942 */
1943IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1944{
1945 /*
1946 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1947 *
1948 * First translate CS:rIP to a physical address.
1949 */
1950 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1951 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1952 uint32_t cbToTryRead;
1953 RTGCPTR GCPtrNext;
1954 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1955 {
1956 cbToTryRead = PAGE_SIZE;
1957 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1958 if (!IEM_IS_CANONICAL(GCPtrNext))
1959 return iemRaiseGeneralProtectionFault0(pVCpu);
1960 }
1961 else
1962 {
1963 uint32_t GCPtrNext32 = pCtx->eip;
1964 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1965 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1966 if (GCPtrNext32 > pCtx->cs.u32Limit)
1967 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1968 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1969 if (!cbToTryRead) /* overflowed */
1970 {
1971 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1972 cbToTryRead = UINT32_MAX;
1973 /** @todo check out wrapping around the code segment. */
1974 }
1975 if (cbToTryRead < cbMin - cbLeft)
1976 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1977 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1978 }
1979
1980 /* Only read up to the end of the page, and make sure we don't read more
1981 than the opcode buffer can hold. */
1982 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1983 if (cbToTryRead > cbLeftOnPage)
1984 cbToTryRead = cbLeftOnPage;
1985 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1986 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1987/** @todo r=bird: Convert assertion into undefined opcode exception? */
1988 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1989
1990# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1991 /* Allow interpretation of patch manager code blocks since they can for
1992 instance throw #PFs for perfectly good reasons. */
1993 if (pVCpu->iem.s.fInPatchCode)
1994 {
1995 size_t cbRead = 0;
1996 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1997 AssertRCReturn(rc, rc);
1998 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1999 return VINF_SUCCESS;
2000 }
2001# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2002
2003 RTGCPHYS GCPhys;
2004 uint64_t fFlags;
2005 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2006 if (RT_FAILURE(rc))
2007 {
2008 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2009 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2010 }
2011 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2012 {
2013 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2014 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2015 }
2016 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2020 }
2021 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2022 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2023 /** @todo Check reserved bits and such stuff. PGM is better at doing
2024 * that, so do it when implementing the guest virtual address
2025 * TLB... */
2026
2027 /*
2028 * Read the bytes at this address.
2029 *
2030 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2031 * and since PATM should only patch the start of an instruction there
2032 * should be no need to check again here.
2033 */
2034 if (!pVCpu->iem.s.fBypassHandlers)
2035 {
2036 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2037 cbToTryRead, PGMACCESSORIGIN_IEM);
2038 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2039 { /* likely */ }
2040 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2041 {
2042 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2043 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2044 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2045 }
2046 else
2047 {
2048 Log((RT_SUCCESS(rcStrict)
2049 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2050 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2051 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2052 return rcStrict;
2053 }
2054 }
2055 else
2056 {
2057 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2058 if (RT_SUCCESS(rc))
2059 { /* likely */ }
2060 else
2061 {
2062 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2063 return rc;
2064 }
2065 }
2066 pVCpu->iem.s.cbOpcode += cbToTryRead;
2067 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2068
2069 return VINF_SUCCESS;
2070}
2071
2072#endif /* !IEM_WITH_CODE_TLB */
2073#ifndef IEM_WITH_SETJMP
2074
2075/**
2076 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2077 *
2078 * @returns Strict VBox status code.
2079 * @param pVCpu The cross context virtual CPU structure of the
2080 * calling thread.
2081 * @param pb Where to return the opcode byte.
2082 */
2083DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2084{
2085 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2086 if (rcStrict == VINF_SUCCESS)
2087 {
2088 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2089 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2090 pVCpu->iem.s.offOpcode = offOpcode + 1;
2091 }
2092 else
2093 *pb = 0;
2094 return rcStrict;
2095}
2096
2097
2098/**
2099 * Fetches the next opcode byte.
2100 *
2101 * @returns Strict VBox status code.
2102 * @param pVCpu The cross context virtual CPU structure of the
2103 * calling thread.
2104 * @param pu8 Where to return the opcode byte.
2105 */
2106DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2107{
2108 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2109 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2110 {
2111 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2112 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2113 return VINF_SUCCESS;
2114 }
2115 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2116}
2117
2118#else /* IEM_WITH_SETJMP */
2119
2120/**
2121 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2122 *
2123 * @returns The opcode byte.
2124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2125 */
2126DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2127{
2128# ifdef IEM_WITH_CODE_TLB
2129 uint8_t u8;
2130 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2131 return u8;
2132# else
2133 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2134 if (rcStrict == VINF_SUCCESS)
2135 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2136 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2137# endif
2138}
2139
2140
2141/**
2142 * Fetches the next opcode byte, longjmp on error.
2143 *
2144 * @returns The opcode byte.
2145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2146 */
2147DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2148{
2149# ifdef IEM_WITH_CODE_TLB
2150 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2151 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2152 if (RT_LIKELY( pbBuf != NULL
2153 && offBuf < pVCpu->iem.s.cbInstrBuf))
2154 {
2155 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2156 return pbBuf[offBuf];
2157 }
2158# else
2159 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2160 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2161 {
2162 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2163 return pVCpu->iem.s.abOpcode[offOpcode];
2164 }
2165# endif
2166 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2167}
2168
2169#endif /* IEM_WITH_SETJMP */
2170
2171/**
2172 * Fetches the next opcode byte, returns automatically on failure.
2173 *
2174 * @param a_pu8 Where to return the opcode byte.
2175 * @remark Implicitly references pVCpu.
2176 */
2177#ifndef IEM_WITH_SETJMP
2178# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2179 do \
2180 { \
2181 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2182 if (rcStrict2 == VINF_SUCCESS) \
2183 { /* likely */ } \
2184 else \
2185 return rcStrict2; \
2186 } while (0)
2187#else
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2189#endif /* IEM_WITH_SETJMP */
2190
2191
2192#ifndef IEM_WITH_SETJMP
2193/**
2194 * Fetches the next signed byte from the opcode stream.
2195 *
2196 * @returns Strict VBox status code.
2197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2198 * @param pi8 Where to return the signed byte.
2199 */
2200DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2201{
2202 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2203}
2204#endif /* !IEM_WITH_SETJMP */
2205
2206
2207/**
2208 * Fetches the next signed byte from the opcode stream, returning automatically
2209 * on failure.
2210 *
2211 * @param a_pi8 Where to return the signed byte.
2212 * @remark Implicitly references pVCpu.
2213 */
2214#ifndef IEM_WITH_SETJMP
2215# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2216 do \
2217 { \
2218 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2219 if (rcStrict2 != VINF_SUCCESS) \
2220 return rcStrict2; \
2221 } while (0)
2222#else /* IEM_WITH_SETJMP */
2223# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2224
2225#endif /* IEM_WITH_SETJMP */
2226
2227#ifndef IEM_WITH_SETJMP
2228
2229/**
2230 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2231 *
2232 * @returns Strict VBox status code.
2233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2234 * @param pu16 Where to return the opcode dword.
2235 */
2236DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2237{
2238 uint8_t u8;
2239 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2240 if (rcStrict == VINF_SUCCESS)
2241 *pu16 = (int8_t)u8;
2242 return rcStrict;
2243}
2244
2245
2246/**
2247 * Fetches the next signed byte from the opcode stream, extending it to
2248 * unsigned 16-bit.
2249 *
2250 * @returns Strict VBox status code.
2251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2252 * @param pu16 Where to return the unsigned word.
2253 */
2254DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2255{
2256 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2257 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2258 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2259
2260 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2261 pVCpu->iem.s.offOpcode = offOpcode + 1;
2262 return VINF_SUCCESS;
2263}
2264
2265#endif /* !IEM_WITH_SETJMP */
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream and sign-extending it to
2269 * a word, returning automatically on failure.
2270 *
2271 * @param a_pu16 Where to return the word.
2272 * @remark Implicitly references pVCpu.
2273 */
2274#ifndef IEM_WITH_SETJMP
2275# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2276 do \
2277 { \
2278 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2279 if (rcStrict2 != VINF_SUCCESS) \
2280 return rcStrict2; \
2281 } while (0)
2282#else
2283# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2284#endif
2285
2286#ifndef IEM_WITH_SETJMP
2287
2288/**
2289 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2290 *
2291 * @returns Strict VBox status code.
2292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2293 * @param pu32 Where to return the opcode dword.
2294 */
2295DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2296{
2297 uint8_t u8;
2298 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2299 if (rcStrict == VINF_SUCCESS)
2300 *pu32 = (int8_t)u8;
2301 return rcStrict;
2302}
2303
2304
2305/**
2306 * Fetches the next signed byte from the opcode stream, extending it to
2307 * unsigned 32-bit.
2308 *
2309 * @returns Strict VBox status code.
2310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2311 * @param pu32 Where to return the unsigned dword.
2312 */
2313DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2314{
2315 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2316 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2317 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2318
2319 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2320 pVCpu->iem.s.offOpcode = offOpcode + 1;
2321 return VINF_SUCCESS;
2322}
2323
2324#endif /* !IEM_WITH_SETJMP */
2325
2326/**
2327 * Fetches the next signed byte from the opcode stream and sign-extending it to
2328 * a word, returning automatically on failure.
2329 *
2330 * @param a_pu32 Where to return the word.
2331 * @remark Implicitly references pVCpu.
2332 */
2333#ifndef IEM_WITH_SETJMP
2334#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2335 do \
2336 { \
2337 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2338 if (rcStrict2 != VINF_SUCCESS) \
2339 return rcStrict2; \
2340 } while (0)
2341#else
2342# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2343#endif
2344
2345#ifndef IEM_WITH_SETJMP
2346
2347/**
2348 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2349 *
2350 * @returns Strict VBox status code.
2351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2352 * @param pu64 Where to return the opcode qword.
2353 */
2354DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2355{
2356 uint8_t u8;
2357 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2358 if (rcStrict == VINF_SUCCESS)
2359 *pu64 = (int8_t)u8;
2360 return rcStrict;
2361}
2362
2363
2364/**
2365 * Fetches the next signed byte from the opcode stream, extending it to
2366 * unsigned 64-bit.
2367 *
2368 * @returns Strict VBox status code.
2369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2370 * @param pu64 Where to return the unsigned qword.
2371 */
2372DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2373{
2374 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2375 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2376 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2377
2378 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2379 pVCpu->iem.s.offOpcode = offOpcode + 1;
2380 return VINF_SUCCESS;
2381}
2382
2383#endif /* !IEM_WITH_SETJMP */
2384
2385
2386/**
2387 * Fetches the next signed byte from the opcode stream and sign-extending it to
2388 * a word, returning automatically on failure.
2389 *
2390 * @param a_pu64 Where to return the word.
2391 * @remark Implicitly references pVCpu.
2392 */
2393#ifndef IEM_WITH_SETJMP
2394# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2395 do \
2396 { \
2397 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2398 if (rcStrict2 != VINF_SUCCESS) \
2399 return rcStrict2; \
2400 } while (0)
2401#else
2402# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2403#endif
2404
2405
2406#ifndef IEM_WITH_SETJMP
2407
2408/**
2409 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2410 *
2411 * @returns Strict VBox status code.
2412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2413 * @param pu16 Where to return the opcode word.
2414 */
2415DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2416{
2417 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2418 if (rcStrict == VINF_SUCCESS)
2419 {
2420 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2421# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2422 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2423# else
2424 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2425# endif
2426 pVCpu->iem.s.offOpcode = offOpcode + 2;
2427 }
2428 else
2429 *pu16 = 0;
2430 return rcStrict;
2431}
2432
2433
2434/**
2435 * Fetches the next opcode word.
2436 *
2437 * @returns Strict VBox status code.
2438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2439 * @param pu16 Where to return the opcode word.
2440 */
2441DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2442{
2443 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2444 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2445 {
2446 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2447# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2448 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2449# else
2450 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2451# endif
2452 return VINF_SUCCESS;
2453 }
2454 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2455}
2456
2457#else /* IEM_WITH_SETJMP */
2458
2459/**
2460 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2461 *
2462 * @returns The opcode word.
2463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2464 */
2465DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2466{
2467# ifdef IEM_WITH_CODE_TLB
2468 uint16_t u16;
2469 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2470 return u16;
2471# else
2472 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2473 if (rcStrict == VINF_SUCCESS)
2474 {
2475 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2476 pVCpu->iem.s.offOpcode += 2;
2477# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2478 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2479# else
2480 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2481# endif
2482 }
2483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2484# endif
2485}
2486
2487
2488/**
2489 * Fetches the next opcode word, longjmp on error.
2490 *
2491 * @returns The opcode word.
2492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2493 */
2494DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2495{
2496# ifdef IEM_WITH_CODE_TLB
2497 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2498 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2499 if (RT_LIKELY( pbBuf != NULL
2500 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2501 {
2502 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2503# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2504 return *(uint16_t const *)&pbBuf[offBuf];
2505# else
2506 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2507# endif
2508 }
2509# else
2510 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2511 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2512 {
2513 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 }
2520# endif
2521 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2522}
2523
2524#endif /* IEM_WITH_SETJMP */
2525
2526
2527/**
2528 * Fetches the next opcode word, returns automatically on failure.
2529 *
2530 * @param a_pu16 Where to return the opcode word.
2531 * @remark Implicitly references pVCpu.
2532 */
2533#ifndef IEM_WITH_SETJMP
2534# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2535 do \
2536 { \
2537 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2538 if (rcStrict2 != VINF_SUCCESS) \
2539 return rcStrict2; \
2540 } while (0)
2541#else
2542# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2543#endif
2544
2545#ifndef IEM_WITH_SETJMP
2546
2547/**
2548 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2549 *
2550 * @returns Strict VBox status code.
2551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2552 * @param pu32 Where to return the opcode double word.
2553 */
2554DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2555{
2556 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2557 if (rcStrict == VINF_SUCCESS)
2558 {
2559 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2560 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2561 pVCpu->iem.s.offOpcode = offOpcode + 2;
2562 }
2563 else
2564 *pu32 = 0;
2565 return rcStrict;
2566}
2567
2568
2569/**
2570 * Fetches the next opcode word, zero extending it to a double word.
2571 *
2572 * @returns Strict VBox status code.
2573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2574 * @param pu32 Where to return the opcode double word.
2575 */
2576DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2577{
2578 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2579 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2580 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2581
2582 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2583 pVCpu->iem.s.offOpcode = offOpcode + 2;
2584 return VINF_SUCCESS;
2585}
2586
2587#endif /* !IEM_WITH_SETJMP */
2588
2589
2590/**
2591 * Fetches the next opcode word and zero extends it to a double word, returns
2592 * automatically on failure.
2593 *
2594 * @param a_pu32 Where to return the opcode double word.
2595 * @remark Implicitly references pVCpu.
2596 */
2597#ifndef IEM_WITH_SETJMP
2598# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2599 do \
2600 { \
2601 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2602 if (rcStrict2 != VINF_SUCCESS) \
2603 return rcStrict2; \
2604 } while (0)
2605#else
2606# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2607#endif
2608
2609#ifndef IEM_WITH_SETJMP
2610
2611/**
2612 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2613 *
2614 * @returns Strict VBox status code.
2615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2616 * @param pu64 Where to return the opcode quad word.
2617 */
2618DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2619{
2620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2621 if (rcStrict == VINF_SUCCESS)
2622 {
2623 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2624 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2625 pVCpu->iem.s.offOpcode = offOpcode + 2;
2626 }
2627 else
2628 *pu64 = 0;
2629 return rcStrict;
2630}
2631
2632
2633/**
2634 * Fetches the next opcode word, zero extending it to a quad word.
2635 *
2636 * @returns Strict VBox status code.
2637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2638 * @param pu64 Where to return the opcode quad word.
2639 */
2640DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2641{
2642 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2643 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2644 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2645
2646 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2647 pVCpu->iem.s.offOpcode = offOpcode + 2;
2648 return VINF_SUCCESS;
2649}
2650
2651#endif /* !IEM_WITH_SETJMP */
2652
2653/**
2654 * Fetches the next opcode word and zero extends it to a quad word, returns
2655 * automatically on failure.
2656 *
2657 * @param a_pu64 Where to return the opcode quad word.
2658 * @remark Implicitly references pVCpu.
2659 */
2660#ifndef IEM_WITH_SETJMP
2661# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2662 do \
2663 { \
2664 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2665 if (rcStrict2 != VINF_SUCCESS) \
2666 return rcStrict2; \
2667 } while (0)
2668#else
2669# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2670#endif
2671
2672
2673#ifndef IEM_WITH_SETJMP
2674/**
2675 * Fetches the next signed word from the opcode stream.
2676 *
2677 * @returns Strict VBox status code.
2678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2679 * @param pi16 Where to return the signed word.
2680 */
2681DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2682{
2683 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2684}
2685#endif /* !IEM_WITH_SETJMP */
2686
2687
2688/**
2689 * Fetches the next signed word from the opcode stream, returning automatically
2690 * on failure.
2691 *
2692 * @param a_pi16 Where to return the signed word.
2693 * @remark Implicitly references pVCpu.
2694 */
2695#ifndef IEM_WITH_SETJMP
2696# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2697 do \
2698 { \
2699 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2700 if (rcStrict2 != VINF_SUCCESS) \
2701 return rcStrict2; \
2702 } while (0)
2703#else
2704# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2705#endif
2706
2707#ifndef IEM_WITH_SETJMP
2708
2709/**
2710 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2711 *
2712 * @returns Strict VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2714 * @param pu32 Where to return the opcode dword.
2715 */
2716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2717{
2718 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2719 if (rcStrict == VINF_SUCCESS)
2720 {
2721 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2722# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2723 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2724# else
2725 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2726 pVCpu->iem.s.abOpcode[offOpcode + 1],
2727 pVCpu->iem.s.abOpcode[offOpcode + 2],
2728 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2729# endif
2730 pVCpu->iem.s.offOpcode = offOpcode + 4;
2731 }
2732 else
2733 *pu32 = 0;
2734 return rcStrict;
2735}
2736
2737
2738/**
2739 * Fetches the next opcode dword.
2740 *
2741 * @returns Strict VBox status code.
2742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2743 * @param pu32 Where to return the opcode double word.
2744 */
2745DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2746{
2747 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2748 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2749 {
2750 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2751# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2752 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2753# else
2754 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2755 pVCpu->iem.s.abOpcode[offOpcode + 1],
2756 pVCpu->iem.s.abOpcode[offOpcode + 2],
2757 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2758# endif
2759 return VINF_SUCCESS;
2760 }
2761 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2762}
2763
2764#else /* !IEM_WITH_SETJMP */
2765
2766/**
2767 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2768 *
2769 * @returns The opcode dword.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 */
2772DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2773{
2774# ifdef IEM_WITH_CODE_TLB
2775 uint32_t u32;
2776 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2777 return u32;
2778# else
2779 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2780 if (rcStrict == VINF_SUCCESS)
2781 {
2782 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2783 pVCpu->iem.s.offOpcode = offOpcode + 4;
2784# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2785 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2786# else
2787 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2788 pVCpu->iem.s.abOpcode[offOpcode + 1],
2789 pVCpu->iem.s.abOpcode[offOpcode + 2],
2790 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2791# endif
2792 }
2793 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2794# endif
2795}
2796
2797
2798/**
2799 * Fetches the next opcode dword, longjmp on error.
2800 *
2801 * @returns The opcode dword.
2802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2803 */
2804DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2805{
2806# ifdef IEM_WITH_CODE_TLB
2807 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2808 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2809 if (RT_LIKELY( pbBuf != NULL
2810 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2811 {
2812 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2813# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2814 return *(uint32_t const *)&pbBuf[offBuf];
2815# else
2816 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2817 pbBuf[offBuf + 1],
2818 pbBuf[offBuf + 2],
2819 pbBuf[offBuf + 3]);
2820# endif
2821 }
2822# else
2823 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2824 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2825 {
2826 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2827# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2828 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2829# else
2830 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2831 pVCpu->iem.s.abOpcode[offOpcode + 1],
2832 pVCpu->iem.s.abOpcode[offOpcode + 2],
2833 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2834# endif
2835 }
2836# endif
2837 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2838}
2839
2840#endif /* !IEM_WITH_SETJMP */
2841
2842
2843/**
2844 * Fetches the next opcode dword, returns automatically on failure.
2845 *
2846 * @param a_pu32 Where to return the opcode dword.
2847 * @remark Implicitly references pVCpu.
2848 */
2849#ifndef IEM_WITH_SETJMP
2850# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2851 do \
2852 { \
2853 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2854 if (rcStrict2 != VINF_SUCCESS) \
2855 return rcStrict2; \
2856 } while (0)
2857#else
2858# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2859#endif
2860
2861#ifndef IEM_WITH_SETJMP
2862
2863/**
2864 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2865 *
2866 * @returns Strict VBox status code.
2867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2868 * @param pu64 Where to return the opcode dword.
2869 */
2870DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2871{
2872 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2873 if (rcStrict == VINF_SUCCESS)
2874 {
2875 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2876 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2877 pVCpu->iem.s.abOpcode[offOpcode + 1],
2878 pVCpu->iem.s.abOpcode[offOpcode + 2],
2879 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2880 pVCpu->iem.s.offOpcode = offOpcode + 4;
2881 }
2882 else
2883 *pu64 = 0;
2884 return rcStrict;
2885}
2886
2887
2888/**
2889 * Fetches the next opcode dword, zero extending it to a quad word.
2890 *
2891 * @returns Strict VBox status code.
2892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2893 * @param pu64 Where to return the opcode quad word.
2894 */
2895DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2896{
2897 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2898 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2899 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2900
2901 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2902 pVCpu->iem.s.abOpcode[offOpcode + 1],
2903 pVCpu->iem.s.abOpcode[offOpcode + 2],
2904 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2905 pVCpu->iem.s.offOpcode = offOpcode + 4;
2906 return VINF_SUCCESS;
2907}
2908
2909#endif /* !IEM_WITH_SETJMP */
2910
2911
2912/**
2913 * Fetches the next opcode dword and zero extends it to a quad word, returns
2914 * automatically on failure.
2915 *
2916 * @param a_pu64 Where to return the opcode quad word.
2917 * @remark Implicitly references pVCpu.
2918 */
2919#ifndef IEM_WITH_SETJMP
2920# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2921 do \
2922 { \
2923 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2924 if (rcStrict2 != VINF_SUCCESS) \
2925 return rcStrict2; \
2926 } while (0)
2927#else
2928# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2929#endif
2930
2931
2932#ifndef IEM_WITH_SETJMP
2933/**
2934 * Fetches the next signed double word from the opcode stream.
2935 *
2936 * @returns Strict VBox status code.
2937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2938 * @param pi32 Where to return the signed double word.
2939 */
2940DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2941{
2942 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2943}
2944#endif
2945
2946/**
2947 * Fetches the next signed double word from the opcode stream, returning
2948 * automatically on failure.
2949 *
2950 * @param a_pi32 Where to return the signed double word.
2951 * @remark Implicitly references pVCpu.
2952 */
2953#ifndef IEM_WITH_SETJMP
2954# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2955 do \
2956 { \
2957 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2958 if (rcStrict2 != VINF_SUCCESS) \
2959 return rcStrict2; \
2960 } while (0)
2961#else
2962# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2963#endif
2964
2965#ifndef IEM_WITH_SETJMP
2966
2967/**
2968 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2969 *
2970 * @returns Strict VBox status code.
2971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2972 * @param pu64 Where to return the opcode qword.
2973 */
2974DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2975{
2976 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2977 if (rcStrict == VINF_SUCCESS)
2978 {
2979 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2980 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2981 pVCpu->iem.s.abOpcode[offOpcode + 1],
2982 pVCpu->iem.s.abOpcode[offOpcode + 2],
2983 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2984 pVCpu->iem.s.offOpcode = offOpcode + 4;
2985 }
2986 else
2987 *pu64 = 0;
2988 return rcStrict;
2989}
2990
2991
2992/**
2993 * Fetches the next opcode dword, sign extending it into a quad word.
2994 *
2995 * @returns Strict VBox status code.
2996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2997 * @param pu64 Where to return the opcode quad word.
2998 */
2999DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3000{
3001 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3002 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3003 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3004
3005 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3006 pVCpu->iem.s.abOpcode[offOpcode + 1],
3007 pVCpu->iem.s.abOpcode[offOpcode + 2],
3008 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3009 *pu64 = i32;
3010 pVCpu->iem.s.offOpcode = offOpcode + 4;
3011 return VINF_SUCCESS;
3012}
3013
3014#endif /* !IEM_WITH_SETJMP */
3015
3016
3017/**
3018 * Fetches the next opcode double word and sign extends it to a quad word,
3019 * returns automatically on failure.
3020 *
3021 * @param a_pu64 Where to return the opcode quad word.
3022 * @remark Implicitly references pVCpu.
3023 */
3024#ifndef IEM_WITH_SETJMP
3025# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3026 do \
3027 { \
3028 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3029 if (rcStrict2 != VINF_SUCCESS) \
3030 return rcStrict2; \
3031 } while (0)
3032#else
3033# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3034#endif
3035
3036#ifndef IEM_WITH_SETJMP
3037
3038/**
3039 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3040 *
3041 * @returns Strict VBox status code.
3042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3043 * @param pu64 Where to return the opcode qword.
3044 */
3045DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3046{
3047 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3048 if (rcStrict == VINF_SUCCESS)
3049 {
3050 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3051# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3052 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3053# else
3054 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3055 pVCpu->iem.s.abOpcode[offOpcode + 1],
3056 pVCpu->iem.s.abOpcode[offOpcode + 2],
3057 pVCpu->iem.s.abOpcode[offOpcode + 3],
3058 pVCpu->iem.s.abOpcode[offOpcode + 4],
3059 pVCpu->iem.s.abOpcode[offOpcode + 5],
3060 pVCpu->iem.s.abOpcode[offOpcode + 6],
3061 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3062# endif
3063 pVCpu->iem.s.offOpcode = offOpcode + 8;
3064 }
3065 else
3066 *pu64 = 0;
3067 return rcStrict;
3068}
3069
3070
3071/**
3072 * Fetches the next opcode qword.
3073 *
3074 * @returns Strict VBox status code.
3075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3076 * @param pu64 Where to return the opcode qword.
3077 */
3078DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3079{
3080 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3081 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3082 {
3083# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3084 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3085# else
3086 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3087 pVCpu->iem.s.abOpcode[offOpcode + 1],
3088 pVCpu->iem.s.abOpcode[offOpcode + 2],
3089 pVCpu->iem.s.abOpcode[offOpcode + 3],
3090 pVCpu->iem.s.abOpcode[offOpcode + 4],
3091 pVCpu->iem.s.abOpcode[offOpcode + 5],
3092 pVCpu->iem.s.abOpcode[offOpcode + 6],
3093 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3094# endif
3095 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3096 return VINF_SUCCESS;
3097 }
3098 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3099}
3100
3101#else /* IEM_WITH_SETJMP */
3102
3103/**
3104 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3105 *
3106 * @returns The opcode qword.
3107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3108 */
3109DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3110{
3111# ifdef IEM_WITH_CODE_TLB
3112 uint64_t u64;
3113 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3114 return u64;
3115# else
3116 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3117 if (rcStrict == VINF_SUCCESS)
3118 {
3119 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3120 pVCpu->iem.s.offOpcode = offOpcode + 8;
3121# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3122 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3123# else
3124 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3125 pVCpu->iem.s.abOpcode[offOpcode + 1],
3126 pVCpu->iem.s.abOpcode[offOpcode + 2],
3127 pVCpu->iem.s.abOpcode[offOpcode + 3],
3128 pVCpu->iem.s.abOpcode[offOpcode + 4],
3129 pVCpu->iem.s.abOpcode[offOpcode + 5],
3130 pVCpu->iem.s.abOpcode[offOpcode + 6],
3131 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3132# endif
3133 }
3134 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3135# endif
3136}
3137
3138
3139/**
3140 * Fetches the next opcode qword, longjmp on error.
3141 *
3142 * @returns The opcode qword.
3143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3144 */
3145DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3146{
3147# ifdef IEM_WITH_CODE_TLB
3148 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3149 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3150 if (RT_LIKELY( pbBuf != NULL
3151 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3152 {
3153 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3154# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3155 return *(uint64_t const *)&pbBuf[offBuf];
3156# else
3157 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3158 pbBuf[offBuf + 1],
3159 pbBuf[offBuf + 2],
3160 pbBuf[offBuf + 3],
3161 pbBuf[offBuf + 4],
3162 pbBuf[offBuf + 5],
3163 pbBuf[offBuf + 6],
3164 pbBuf[offBuf + 7]);
3165# endif
3166 }
3167# else
3168 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3169 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3170 {
3171 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3172# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3173 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3174# else
3175 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3176 pVCpu->iem.s.abOpcode[offOpcode + 1],
3177 pVCpu->iem.s.abOpcode[offOpcode + 2],
3178 pVCpu->iem.s.abOpcode[offOpcode + 3],
3179 pVCpu->iem.s.abOpcode[offOpcode + 4],
3180 pVCpu->iem.s.abOpcode[offOpcode + 5],
3181 pVCpu->iem.s.abOpcode[offOpcode + 6],
3182 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3183# endif
3184 }
3185# endif
3186 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3187}
3188
3189#endif /* IEM_WITH_SETJMP */
3190
3191/**
3192 * Fetches the next opcode quad word, returns automatically on failure.
3193 *
3194 * @param a_pu64 Where to return the opcode quad word.
3195 * @remark Implicitly references pVCpu.
3196 */
3197#ifndef IEM_WITH_SETJMP
3198# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3199 do \
3200 { \
3201 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3202 if (rcStrict2 != VINF_SUCCESS) \
3203 return rcStrict2; \
3204 } while (0)
3205#else
3206# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3207#endif
3208
3209
3210/** @name Misc Worker Functions.
3211 * @{
3212 */
3213
3214/* Currently used only with nested hw.virt. */
3215#ifdef VBOX_WITH_NESTED_HWVIRT
3216/**
3217 * Initiates a CPU shutdown sequence.
3218 *
3219 * @returns Strict VBox status code.
3220 * @param pVCpu The cross context virtual CPU structure of the
3221 * calling thread.
3222 */
3223IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3224{
3225 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3226 {
3227 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3228 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3229 }
3230
3231 RT_NOREF_PV(pVCpu);
3232 /** @todo Probably need a separate error code and handling for this to
3233 * distinguish it from the regular triple fault. */
3234 return VINF_EM_TRIPLE_FAULT;
3235}
3236#endif
3237
3238/**
3239 * Validates a new SS segment.
3240 *
3241 * @returns VBox strict status code.
3242 * @param pVCpu The cross context virtual CPU structure of the
3243 * calling thread.
3244 * @param pCtx The CPU context.
3245 * @param NewSS The new SS selctor.
3246 * @param uCpl The CPL to load the stack for.
3247 * @param pDesc Where to return the descriptor.
3248 */
3249IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3250{
3251 NOREF(pCtx);
3252
3253 /* Null selectors are not allowed (we're not called for dispatching
3254 interrupts with SS=0 in long mode). */
3255 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3256 {
3257 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3258 return iemRaiseTaskSwitchFault0(pVCpu);
3259 }
3260
3261 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3262 if ((NewSS & X86_SEL_RPL) != uCpl)
3263 {
3264 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3265 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3266 }
3267
3268 /*
3269 * Read the descriptor.
3270 */
3271 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3272 if (rcStrict != VINF_SUCCESS)
3273 return rcStrict;
3274
3275 /*
3276 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3277 */
3278 if (!pDesc->Legacy.Gen.u1DescType)
3279 {
3280 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3281 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3282 }
3283
3284 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3285 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3286 {
3287 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3288 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3289 }
3290 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3291 {
3292 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3293 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3294 }
3295
3296 /* Is it there? */
3297 /** @todo testcase: Is this checked before the canonical / limit check below? */
3298 if (!pDesc->Legacy.Gen.u1Present)
3299 {
3300 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3301 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3302 }
3303
3304 return VINF_SUCCESS;
3305}
3306
3307
3308/**
3309 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3310 * not.
3311 *
3312 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3313 * @param a_pCtx The CPU context.
3314 */
3315#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3316# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3317 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3318 ? (a_pCtx)->eflags.u \
3319 : CPUMRawGetEFlags(a_pVCpu) )
3320#else
3321# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3322 ( (a_pCtx)->eflags.u )
3323#endif
3324
3325/**
3326 * Updates the EFLAGS in the correct manner wrt. PATM.
3327 *
3328 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3329 * @param a_pCtx The CPU context.
3330 * @param a_fEfl The new EFLAGS.
3331 */
3332#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3333# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3334 do { \
3335 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3336 (a_pCtx)->eflags.u = (a_fEfl); \
3337 else \
3338 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3339 } while (0)
3340#else
3341# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3342 do { \
3343 (a_pCtx)->eflags.u = (a_fEfl); \
3344 } while (0)
3345#endif
3346
3347
3348/** @} */
3349
3350/** @name Raising Exceptions.
3351 *
3352 * @{
3353 */
3354
3355
3356/**
3357 * Loads the specified stack far pointer from the TSS.
3358 *
3359 * @returns VBox strict status code.
3360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3361 * @param pCtx The CPU context.
3362 * @param uCpl The CPL to load the stack for.
3363 * @param pSelSS Where to return the new stack segment.
3364 * @param puEsp Where to return the new stack pointer.
3365 */
3366IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3367 PRTSEL pSelSS, uint32_t *puEsp)
3368{
3369 VBOXSTRICTRC rcStrict;
3370 Assert(uCpl < 4);
3371
3372 switch (pCtx->tr.Attr.n.u4Type)
3373 {
3374 /*
3375 * 16-bit TSS (X86TSS16).
3376 */
3377 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3378 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3379 {
3380 uint32_t off = uCpl * 4 + 2;
3381 if (off + 4 <= pCtx->tr.u32Limit)
3382 {
3383 /** @todo check actual access pattern here. */
3384 uint32_t u32Tmp = 0; /* gcc maybe... */
3385 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3386 if (rcStrict == VINF_SUCCESS)
3387 {
3388 *puEsp = RT_LOWORD(u32Tmp);
3389 *pSelSS = RT_HIWORD(u32Tmp);
3390 return VINF_SUCCESS;
3391 }
3392 }
3393 else
3394 {
3395 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3396 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3397 }
3398 break;
3399 }
3400
3401 /*
3402 * 32-bit TSS (X86TSS32).
3403 */
3404 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3405 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3406 {
3407 uint32_t off = uCpl * 8 + 4;
3408 if (off + 7 <= pCtx->tr.u32Limit)
3409 {
3410/** @todo check actual access pattern here. */
3411 uint64_t u64Tmp;
3412 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3413 if (rcStrict == VINF_SUCCESS)
3414 {
3415 *puEsp = u64Tmp & UINT32_MAX;
3416 *pSelSS = (RTSEL)(u64Tmp >> 32);
3417 return VINF_SUCCESS;
3418 }
3419 }
3420 else
3421 {
3422 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3423 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3424 }
3425 break;
3426 }
3427
3428 default:
3429 AssertFailed();
3430 rcStrict = VERR_IEM_IPE_4;
3431 break;
3432 }
3433
3434 *puEsp = 0; /* make gcc happy */
3435 *pSelSS = 0; /* make gcc happy */
3436 return rcStrict;
3437}
3438
3439
3440/**
3441 * Loads the specified stack pointer from the 64-bit TSS.
3442 *
3443 * @returns VBox strict status code.
3444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3445 * @param pCtx The CPU context.
3446 * @param uCpl The CPL to load the stack for.
3447 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3448 * @param puRsp Where to return the new stack pointer.
3449 */
3450IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3451{
3452 Assert(uCpl < 4);
3453 Assert(uIst < 8);
3454 *puRsp = 0; /* make gcc happy */
3455
3456 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3457
3458 uint32_t off;
3459 if (uIst)
3460 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3461 else
3462 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3463 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3464 {
3465 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3466 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3467 }
3468
3469 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3470}
3471
3472
3473/**
3474 * Adjust the CPU state according to the exception being raised.
3475 *
3476 * @param pCtx The CPU context.
3477 * @param u8Vector The exception that has been raised.
3478 */
3479DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3480{
3481 switch (u8Vector)
3482 {
3483 case X86_XCPT_DB:
3484 pCtx->dr[7] &= ~X86_DR7_GD;
3485 break;
3486 /** @todo Read the AMD and Intel exception reference... */
3487 }
3488}
3489
3490
3491/**
3492 * Implements exceptions and interrupts for real mode.
3493 *
3494 * @returns VBox strict status code.
3495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3496 * @param pCtx The CPU context.
3497 * @param cbInstr The number of bytes to offset rIP by in the return
3498 * address.
3499 * @param u8Vector The interrupt / exception vector number.
3500 * @param fFlags The flags.
3501 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3502 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3503 */
3504IEM_STATIC VBOXSTRICTRC
3505iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3506 PCPUMCTX pCtx,
3507 uint8_t cbInstr,
3508 uint8_t u8Vector,
3509 uint32_t fFlags,
3510 uint16_t uErr,
3511 uint64_t uCr2)
3512{
3513 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3514 NOREF(uErr); NOREF(uCr2);
3515
3516 /*
3517 * Read the IDT entry.
3518 */
3519 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3520 {
3521 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3522 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3523 }
3524 RTFAR16 Idte;
3525 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3526 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3527 return rcStrict;
3528
3529 /*
3530 * Push the stack frame.
3531 */
3532 uint16_t *pu16Frame;
3533 uint64_t uNewRsp;
3534 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537
3538 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3539#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3540 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3541 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3542 fEfl |= UINT16_C(0xf000);
3543#endif
3544 pu16Frame[2] = (uint16_t)fEfl;
3545 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3546 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3547 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3548 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3549 return rcStrict;
3550
3551 /*
3552 * Load the vector address into cs:ip and make exception specific state
3553 * adjustments.
3554 */
3555 pCtx->cs.Sel = Idte.sel;
3556 pCtx->cs.ValidSel = Idte.sel;
3557 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3558 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3559 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3560 pCtx->rip = Idte.off;
3561 fEfl &= ~X86_EFL_IF;
3562 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3563
3564 /** @todo do we actually do this in real mode? */
3565 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3566 iemRaiseXcptAdjustState(pCtx, u8Vector);
3567
3568 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3569}
3570
3571
3572/**
3573 * Loads a NULL data selector into when coming from V8086 mode.
3574 *
3575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3576 * @param pSReg Pointer to the segment register.
3577 */
3578IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3579{
3580 pSReg->Sel = 0;
3581 pSReg->ValidSel = 0;
3582 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3583 {
3584 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3585 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3586 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3587 }
3588 else
3589 {
3590 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3591 /** @todo check this on AMD-V */
3592 pSReg->u64Base = 0;
3593 pSReg->u32Limit = 0;
3594 }
3595}
3596
3597
3598/**
3599 * Loads a segment selector during a task switch in V8086 mode.
3600 *
3601 * @param pSReg Pointer to the segment register.
3602 * @param uSel The selector value to load.
3603 */
3604IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3605{
3606 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3607 pSReg->Sel = uSel;
3608 pSReg->ValidSel = uSel;
3609 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3610 pSReg->u64Base = uSel << 4;
3611 pSReg->u32Limit = 0xffff;
3612 pSReg->Attr.u = 0xf3;
3613}
3614
3615
3616/**
3617 * Loads a NULL data selector into a selector register, both the hidden and
3618 * visible parts, in protected mode.
3619 *
3620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3621 * @param pSReg Pointer to the segment register.
3622 * @param uRpl The RPL.
3623 */
3624IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3625{
3626 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3627 * data selector in protected mode. */
3628 pSReg->Sel = uRpl;
3629 pSReg->ValidSel = uRpl;
3630 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3631 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3632 {
3633 /* VT-x (Intel 3960x) observed doing something like this. */
3634 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3635 pSReg->u32Limit = UINT32_MAX;
3636 pSReg->u64Base = 0;
3637 }
3638 else
3639 {
3640 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3641 pSReg->u32Limit = 0;
3642 pSReg->u64Base = 0;
3643 }
3644}
3645
3646
3647/**
3648 * Loads a segment selector during a task switch in protected mode.
3649 *
3650 * In this task switch scenario, we would throw \#TS exceptions rather than
3651 * \#GPs.
3652 *
3653 * @returns VBox strict status code.
3654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3655 * @param pSReg Pointer to the segment register.
3656 * @param uSel The new selector value.
3657 *
3658 * @remarks This does _not_ handle CS or SS.
3659 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3660 */
3661IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3662{
3663 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3664
3665 /* Null data selector. */
3666 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3667 {
3668 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3669 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3670 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3671 return VINF_SUCCESS;
3672 }
3673
3674 /* Fetch the descriptor. */
3675 IEMSELDESC Desc;
3676 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3677 if (rcStrict != VINF_SUCCESS)
3678 {
3679 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3680 VBOXSTRICTRC_VAL(rcStrict)));
3681 return rcStrict;
3682 }
3683
3684 /* Must be a data segment or readable code segment. */
3685 if ( !Desc.Legacy.Gen.u1DescType
3686 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3687 {
3688 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3689 Desc.Legacy.Gen.u4Type));
3690 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3691 }
3692
3693 /* Check privileges for data segments and non-conforming code segments. */
3694 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3695 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3696 {
3697 /* The RPL and the new CPL must be less than or equal to the DPL. */
3698 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3699 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3700 {
3701 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3702 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3703 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3704 }
3705 }
3706
3707 /* Is it there? */
3708 if (!Desc.Legacy.Gen.u1Present)
3709 {
3710 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3711 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3712 }
3713
3714 /* The base and limit. */
3715 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3716 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3717
3718 /*
3719 * Ok, everything checked out fine. Now set the accessed bit before
3720 * committing the result into the registers.
3721 */
3722 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3723 {
3724 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3725 if (rcStrict != VINF_SUCCESS)
3726 return rcStrict;
3727 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3728 }
3729
3730 /* Commit */
3731 pSReg->Sel = uSel;
3732 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3733 pSReg->u32Limit = cbLimit;
3734 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3735 pSReg->ValidSel = uSel;
3736 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3737 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3738 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3739
3740 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3741 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3742 return VINF_SUCCESS;
3743}
3744
3745
3746/**
3747 * Performs a task switch.
3748 *
3749 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3750 * caller is responsible for performing the necessary checks (like DPL, TSS
3751 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3752 * reference for JMP, CALL, IRET.
3753 *
3754 * If the task switch is the due to a software interrupt or hardware exception,
3755 * the caller is responsible for validating the TSS selector and descriptor. See
3756 * Intel Instruction reference for INT n.
3757 *
3758 * @returns VBox strict status code.
3759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3760 * @param pCtx The CPU context.
3761 * @param enmTaskSwitch What caused this task switch.
3762 * @param uNextEip The EIP effective after the task switch.
3763 * @param fFlags The flags.
3764 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3765 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3766 * @param SelTSS The TSS selector of the new task.
3767 * @param pNewDescTSS Pointer to the new TSS descriptor.
3768 */
3769IEM_STATIC VBOXSTRICTRC
3770iemTaskSwitch(PVMCPU pVCpu,
3771 PCPUMCTX pCtx,
3772 IEMTASKSWITCH enmTaskSwitch,
3773 uint32_t uNextEip,
3774 uint32_t fFlags,
3775 uint16_t uErr,
3776 uint64_t uCr2,
3777 RTSEL SelTSS,
3778 PIEMSELDESC pNewDescTSS)
3779{
3780 Assert(!IEM_IS_REAL_MODE(pVCpu));
3781 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3782
3783 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3784 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3785 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3786 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3787 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3788
3789 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3790 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3791
3792 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3793 fIsNewTSS386, pCtx->eip, uNextEip));
3794
3795 /* Update CR2 in case it's a page-fault. */
3796 /** @todo This should probably be done much earlier in IEM/PGM. See
3797 * @bugref{5653#c49}. */
3798 if (fFlags & IEM_XCPT_FLAGS_CR2)
3799 pCtx->cr2 = uCr2;
3800
3801 /*
3802 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3803 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3804 */
3805 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3806 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3807 if (uNewTSSLimit < uNewTSSLimitMin)
3808 {
3809 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3810 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3811 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3812 }
3813
3814 /*
3815 * Check the current TSS limit. The last written byte to the current TSS during the
3816 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3817 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3818 *
3819 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3820 * end up with smaller than "legal" TSS limits.
3821 */
3822 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3823 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3824 if (uCurTSSLimit < uCurTSSLimitMin)
3825 {
3826 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3827 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3829 }
3830
3831 /*
3832 * Verify that the new TSS can be accessed and map it. Map only the required contents
3833 * and not the entire TSS.
3834 */
3835 void *pvNewTSS;
3836 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3837 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3838 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3839 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3840 * not perform correct translation if this happens. See Intel spec. 7.2.1
3841 * "Task-State Segment" */
3842 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3843 if (rcStrict != VINF_SUCCESS)
3844 {
3845 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3846 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3847 return rcStrict;
3848 }
3849
3850 /*
3851 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3852 */
3853 uint32_t u32EFlags = pCtx->eflags.u32;
3854 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3855 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3856 {
3857 PX86DESC pDescCurTSS;
3858 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3859 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3860 if (rcStrict != VINF_SUCCESS)
3861 {
3862 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3863 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3864 return rcStrict;
3865 }
3866
3867 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3868 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3869 if (rcStrict != VINF_SUCCESS)
3870 {
3871 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3872 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3873 return rcStrict;
3874 }
3875
3876 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3877 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3878 {
3879 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3880 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3881 u32EFlags &= ~X86_EFL_NT;
3882 }
3883 }
3884
3885 /*
3886 * Save the CPU state into the current TSS.
3887 */
3888 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3889 if (GCPtrNewTSS == GCPtrCurTSS)
3890 {
3891 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3892 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3893 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3894 }
3895 if (fIsNewTSS386)
3896 {
3897 /*
3898 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3899 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3900 */
3901 void *pvCurTSS32;
3902 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3903 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3904 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3905 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3906 if (rcStrict != VINF_SUCCESS)
3907 {
3908 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3909 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3910 return rcStrict;
3911 }
3912
3913 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3914 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3915 pCurTSS32->eip = uNextEip;
3916 pCurTSS32->eflags = u32EFlags;
3917 pCurTSS32->eax = pCtx->eax;
3918 pCurTSS32->ecx = pCtx->ecx;
3919 pCurTSS32->edx = pCtx->edx;
3920 pCurTSS32->ebx = pCtx->ebx;
3921 pCurTSS32->esp = pCtx->esp;
3922 pCurTSS32->ebp = pCtx->ebp;
3923 pCurTSS32->esi = pCtx->esi;
3924 pCurTSS32->edi = pCtx->edi;
3925 pCurTSS32->es = pCtx->es.Sel;
3926 pCurTSS32->cs = pCtx->cs.Sel;
3927 pCurTSS32->ss = pCtx->ss.Sel;
3928 pCurTSS32->ds = pCtx->ds.Sel;
3929 pCurTSS32->fs = pCtx->fs.Sel;
3930 pCurTSS32->gs = pCtx->gs.Sel;
3931
3932 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3933 if (rcStrict != VINF_SUCCESS)
3934 {
3935 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3936 VBOXSTRICTRC_VAL(rcStrict)));
3937 return rcStrict;
3938 }
3939 }
3940 else
3941 {
3942 /*
3943 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3944 */
3945 void *pvCurTSS16;
3946 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3947 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3948 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3949 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3950 if (rcStrict != VINF_SUCCESS)
3951 {
3952 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3953 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3954 return rcStrict;
3955 }
3956
3957 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3958 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3959 pCurTSS16->ip = uNextEip;
3960 pCurTSS16->flags = u32EFlags;
3961 pCurTSS16->ax = pCtx->ax;
3962 pCurTSS16->cx = pCtx->cx;
3963 pCurTSS16->dx = pCtx->dx;
3964 pCurTSS16->bx = pCtx->bx;
3965 pCurTSS16->sp = pCtx->sp;
3966 pCurTSS16->bp = pCtx->bp;
3967 pCurTSS16->si = pCtx->si;
3968 pCurTSS16->di = pCtx->di;
3969 pCurTSS16->es = pCtx->es.Sel;
3970 pCurTSS16->cs = pCtx->cs.Sel;
3971 pCurTSS16->ss = pCtx->ss.Sel;
3972 pCurTSS16->ds = pCtx->ds.Sel;
3973
3974 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3975 if (rcStrict != VINF_SUCCESS)
3976 {
3977 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3978 VBOXSTRICTRC_VAL(rcStrict)));
3979 return rcStrict;
3980 }
3981 }
3982
3983 /*
3984 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3985 */
3986 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3987 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3988 {
3989 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3990 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3991 pNewTSS->selPrev = pCtx->tr.Sel;
3992 }
3993
3994 /*
3995 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3996 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3997 */
3998 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3999 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4000 bool fNewDebugTrap;
4001 if (fIsNewTSS386)
4002 {
4003 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4004 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4005 uNewEip = pNewTSS32->eip;
4006 uNewEflags = pNewTSS32->eflags;
4007 uNewEax = pNewTSS32->eax;
4008 uNewEcx = pNewTSS32->ecx;
4009 uNewEdx = pNewTSS32->edx;
4010 uNewEbx = pNewTSS32->ebx;
4011 uNewEsp = pNewTSS32->esp;
4012 uNewEbp = pNewTSS32->ebp;
4013 uNewEsi = pNewTSS32->esi;
4014 uNewEdi = pNewTSS32->edi;
4015 uNewES = pNewTSS32->es;
4016 uNewCS = pNewTSS32->cs;
4017 uNewSS = pNewTSS32->ss;
4018 uNewDS = pNewTSS32->ds;
4019 uNewFS = pNewTSS32->fs;
4020 uNewGS = pNewTSS32->gs;
4021 uNewLdt = pNewTSS32->selLdt;
4022 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4023 }
4024 else
4025 {
4026 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4027 uNewCr3 = 0;
4028 uNewEip = pNewTSS16->ip;
4029 uNewEflags = pNewTSS16->flags;
4030 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4031 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4032 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4033 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4034 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4035 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4036 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4037 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4038 uNewES = pNewTSS16->es;
4039 uNewCS = pNewTSS16->cs;
4040 uNewSS = pNewTSS16->ss;
4041 uNewDS = pNewTSS16->ds;
4042 uNewFS = 0;
4043 uNewGS = 0;
4044 uNewLdt = pNewTSS16->selLdt;
4045 fNewDebugTrap = false;
4046 }
4047
4048 if (GCPtrNewTSS == GCPtrCurTSS)
4049 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4050 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4051
4052 /*
4053 * We're done accessing the new TSS.
4054 */
4055 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4056 if (rcStrict != VINF_SUCCESS)
4057 {
4058 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4059 return rcStrict;
4060 }
4061
4062 /*
4063 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4064 */
4065 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4066 {
4067 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4068 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4069 if (rcStrict != VINF_SUCCESS)
4070 {
4071 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4072 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4073 return rcStrict;
4074 }
4075
4076 /* Check that the descriptor indicates the new TSS is available (not busy). */
4077 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4078 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4079 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4080
4081 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4082 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4083 if (rcStrict != VINF_SUCCESS)
4084 {
4085 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4086 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4087 return rcStrict;
4088 }
4089 }
4090
4091 /*
4092 * From this point on, we're technically in the new task. We will defer exceptions
4093 * until the completion of the task switch but before executing any instructions in the new task.
4094 */
4095 pCtx->tr.Sel = SelTSS;
4096 pCtx->tr.ValidSel = SelTSS;
4097 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4098 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4099 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4100 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4101 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4102
4103 /* Set the busy bit in TR. */
4104 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4105 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4106 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4107 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4108 {
4109 uNewEflags |= X86_EFL_NT;
4110 }
4111
4112 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4113 pCtx->cr0 |= X86_CR0_TS;
4114 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4115
4116 pCtx->eip = uNewEip;
4117 pCtx->eax = uNewEax;
4118 pCtx->ecx = uNewEcx;
4119 pCtx->edx = uNewEdx;
4120 pCtx->ebx = uNewEbx;
4121 pCtx->esp = uNewEsp;
4122 pCtx->ebp = uNewEbp;
4123 pCtx->esi = uNewEsi;
4124 pCtx->edi = uNewEdi;
4125
4126 uNewEflags &= X86_EFL_LIVE_MASK;
4127 uNewEflags |= X86_EFL_RA1_MASK;
4128 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4129
4130 /*
4131 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4132 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4133 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4134 */
4135 pCtx->es.Sel = uNewES;
4136 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4137
4138 pCtx->cs.Sel = uNewCS;
4139 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4140
4141 pCtx->ss.Sel = uNewSS;
4142 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4143
4144 pCtx->ds.Sel = uNewDS;
4145 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4146
4147 pCtx->fs.Sel = uNewFS;
4148 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4149
4150 pCtx->gs.Sel = uNewGS;
4151 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4152 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4153
4154 pCtx->ldtr.Sel = uNewLdt;
4155 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4156 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4157 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4158
4159 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4160 {
4161 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4162 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4163 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4164 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4165 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4166 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4167 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4168 }
4169
4170 /*
4171 * Switch CR3 for the new task.
4172 */
4173 if ( fIsNewTSS386
4174 && (pCtx->cr0 & X86_CR0_PG))
4175 {
4176 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4177 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4178 {
4179 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4180 AssertRCSuccessReturn(rc, rc);
4181 }
4182 else
4183 pCtx->cr3 = uNewCr3;
4184
4185 /* Inform PGM. */
4186 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4187 {
4188 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4189 AssertRCReturn(rc, rc);
4190 /* ignore informational status codes */
4191 }
4192 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4193 }
4194
4195 /*
4196 * Switch LDTR for the new task.
4197 */
4198 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4199 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4200 else
4201 {
4202 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4203
4204 IEMSELDESC DescNewLdt;
4205 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4206 if (rcStrict != VINF_SUCCESS)
4207 {
4208 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4209 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4210 return rcStrict;
4211 }
4212 if ( !DescNewLdt.Legacy.Gen.u1Present
4213 || DescNewLdt.Legacy.Gen.u1DescType
4214 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4215 {
4216 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4217 uNewLdt, DescNewLdt.Legacy.u));
4218 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4219 }
4220
4221 pCtx->ldtr.ValidSel = uNewLdt;
4222 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4223 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4224 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4225 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4226 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4227 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4229 }
4230
4231 IEMSELDESC DescSS;
4232 if (IEM_IS_V86_MODE(pVCpu))
4233 {
4234 pVCpu->iem.s.uCpl = 3;
4235 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4236 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4237 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4238 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4239 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4240 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4241
4242 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4243 DescSS.Legacy.u = 0;
4244 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4245 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4246 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4247 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4248 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4249 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4250 DescSS.Legacy.Gen.u2Dpl = 3;
4251 }
4252 else
4253 {
4254 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4255
4256 /*
4257 * Load the stack segment for the new task.
4258 */
4259 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4260 {
4261 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4262 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4263 }
4264
4265 /* Fetch the descriptor. */
4266 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4267 if (rcStrict != VINF_SUCCESS)
4268 {
4269 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4270 VBOXSTRICTRC_VAL(rcStrict)));
4271 return rcStrict;
4272 }
4273
4274 /* SS must be a data segment and writable. */
4275 if ( !DescSS.Legacy.Gen.u1DescType
4276 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4277 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4278 {
4279 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4280 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4281 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4282 }
4283
4284 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4285 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4286 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4287 {
4288 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4289 uNewCpl));
4290 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4291 }
4292
4293 /* Is it there? */
4294 if (!DescSS.Legacy.Gen.u1Present)
4295 {
4296 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4297 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4298 }
4299
4300 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4301 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4302
4303 /* Set the accessed bit before committing the result into SS. */
4304 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4305 {
4306 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4307 if (rcStrict != VINF_SUCCESS)
4308 return rcStrict;
4309 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4310 }
4311
4312 /* Commit SS. */
4313 pCtx->ss.Sel = uNewSS;
4314 pCtx->ss.ValidSel = uNewSS;
4315 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4316 pCtx->ss.u32Limit = cbLimit;
4317 pCtx->ss.u64Base = u64Base;
4318 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4319 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4320
4321 /* CPL has changed, update IEM before loading rest of segments. */
4322 pVCpu->iem.s.uCpl = uNewCpl;
4323
4324 /*
4325 * Load the data segments for the new task.
4326 */
4327 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4328 if (rcStrict != VINF_SUCCESS)
4329 return rcStrict;
4330 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4331 if (rcStrict != VINF_SUCCESS)
4332 return rcStrict;
4333 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4334 if (rcStrict != VINF_SUCCESS)
4335 return rcStrict;
4336 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4337 if (rcStrict != VINF_SUCCESS)
4338 return rcStrict;
4339
4340 /*
4341 * Load the code segment for the new task.
4342 */
4343 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4344 {
4345 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4346 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4347 }
4348
4349 /* Fetch the descriptor. */
4350 IEMSELDESC DescCS;
4351 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4352 if (rcStrict != VINF_SUCCESS)
4353 {
4354 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4355 return rcStrict;
4356 }
4357
4358 /* CS must be a code segment. */
4359 if ( !DescCS.Legacy.Gen.u1DescType
4360 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4361 {
4362 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4363 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4364 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4365 }
4366
4367 /* For conforming CS, DPL must be less than or equal to the RPL. */
4368 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4369 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4370 {
4371 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4372 DescCS.Legacy.Gen.u2Dpl));
4373 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4374 }
4375
4376 /* For non-conforming CS, DPL must match RPL. */
4377 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4378 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4379 {
4380 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4381 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4382 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4383 }
4384
4385 /* Is it there? */
4386 if (!DescCS.Legacy.Gen.u1Present)
4387 {
4388 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4389 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4390 }
4391
4392 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4393 u64Base = X86DESC_BASE(&DescCS.Legacy);
4394
4395 /* Set the accessed bit before committing the result into CS. */
4396 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4397 {
4398 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4399 if (rcStrict != VINF_SUCCESS)
4400 return rcStrict;
4401 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4402 }
4403
4404 /* Commit CS. */
4405 pCtx->cs.Sel = uNewCS;
4406 pCtx->cs.ValidSel = uNewCS;
4407 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4408 pCtx->cs.u32Limit = cbLimit;
4409 pCtx->cs.u64Base = u64Base;
4410 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4412 }
4413
4414 /** @todo Debug trap. */
4415 if (fIsNewTSS386 && fNewDebugTrap)
4416 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4417
4418 /*
4419 * Construct the error code masks based on what caused this task switch.
4420 * See Intel Instruction reference for INT.
4421 */
4422 uint16_t uExt;
4423 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4424 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4425 {
4426 uExt = 1;
4427 }
4428 else
4429 uExt = 0;
4430
4431 /*
4432 * Push any error code on to the new stack.
4433 */
4434 if (fFlags & IEM_XCPT_FLAGS_ERR)
4435 {
4436 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4437 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4438 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4439
4440 /* Check that there is sufficient space on the stack. */
4441 /** @todo Factor out segment limit checking for normal/expand down segments
4442 * into a separate function. */
4443 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4444 {
4445 if ( pCtx->esp - 1 > cbLimitSS
4446 || pCtx->esp < cbStackFrame)
4447 {
4448 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4449 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4450 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4451 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4452 }
4453 }
4454 else
4455 {
4456 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4457 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4458 {
4459 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4460 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4461 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4462 }
4463 }
4464
4465
4466 if (fIsNewTSS386)
4467 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4468 else
4469 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4470 if (rcStrict != VINF_SUCCESS)
4471 {
4472 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4473 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4474 return rcStrict;
4475 }
4476 }
4477
4478 /* Check the new EIP against the new CS limit. */
4479 if (pCtx->eip > pCtx->cs.u32Limit)
4480 {
4481 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4482 pCtx->eip, pCtx->cs.u32Limit));
4483 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4484 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4485 }
4486
4487 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4488 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4489}
4490
4491
4492/**
4493 * Implements exceptions and interrupts for protected mode.
4494 *
4495 * @returns VBox strict status code.
4496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4497 * @param pCtx The CPU context.
4498 * @param cbInstr The number of bytes to offset rIP by in the return
4499 * address.
4500 * @param u8Vector The interrupt / exception vector number.
4501 * @param fFlags The flags.
4502 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4503 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4504 */
4505IEM_STATIC VBOXSTRICTRC
4506iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4507 PCPUMCTX pCtx,
4508 uint8_t cbInstr,
4509 uint8_t u8Vector,
4510 uint32_t fFlags,
4511 uint16_t uErr,
4512 uint64_t uCr2)
4513{
4514 /*
4515 * Read the IDT entry.
4516 */
4517 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4518 {
4519 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4520 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4521 }
4522 X86DESC Idte;
4523 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4524 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4525 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4526 return rcStrict;
4527 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4528 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4529 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4530
4531 /*
4532 * Check the descriptor type, DPL and such.
4533 * ASSUMES this is done in the same order as described for call-gate calls.
4534 */
4535 if (Idte.Gate.u1DescType)
4536 {
4537 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4538 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4539 }
4540 bool fTaskGate = false;
4541 uint8_t f32BitGate = true;
4542 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4543 switch (Idte.Gate.u4Type)
4544 {
4545 case X86_SEL_TYPE_SYS_UNDEFINED:
4546 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4547 case X86_SEL_TYPE_SYS_LDT:
4548 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4549 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4550 case X86_SEL_TYPE_SYS_UNDEFINED2:
4551 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4552 case X86_SEL_TYPE_SYS_UNDEFINED3:
4553 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4554 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4555 case X86_SEL_TYPE_SYS_UNDEFINED4:
4556 {
4557 /** @todo check what actually happens when the type is wrong...
4558 * esp. call gates. */
4559 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4560 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4561 }
4562
4563 case X86_SEL_TYPE_SYS_286_INT_GATE:
4564 f32BitGate = false;
4565 /* fall thru */
4566 case X86_SEL_TYPE_SYS_386_INT_GATE:
4567 fEflToClear |= X86_EFL_IF;
4568 break;
4569
4570 case X86_SEL_TYPE_SYS_TASK_GATE:
4571 fTaskGate = true;
4572#ifndef IEM_IMPLEMENTS_TASKSWITCH
4573 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4574#endif
4575 break;
4576
4577 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4578 f32BitGate = false;
4579 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4580 break;
4581
4582 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4583 }
4584
4585 /* Check DPL against CPL if applicable. */
4586 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4587 {
4588 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4589 {
4590 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4591 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4592 }
4593 }
4594
4595 /* Is it there? */
4596 if (!Idte.Gate.u1Present)
4597 {
4598 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4599 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4600 }
4601
4602 /* Is it a task-gate? */
4603 if (fTaskGate)
4604 {
4605 /*
4606 * Construct the error code masks based on what caused this task switch.
4607 * See Intel Instruction reference for INT.
4608 */
4609 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4610 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4611 RTSEL SelTSS = Idte.Gate.u16Sel;
4612
4613 /*
4614 * Fetch the TSS descriptor in the GDT.
4615 */
4616 IEMSELDESC DescTSS;
4617 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4618 if (rcStrict != VINF_SUCCESS)
4619 {
4620 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4621 VBOXSTRICTRC_VAL(rcStrict)));
4622 return rcStrict;
4623 }
4624
4625 /* The TSS descriptor must be a system segment and be available (not busy). */
4626 if ( DescTSS.Legacy.Gen.u1DescType
4627 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4628 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4629 {
4630 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4631 u8Vector, SelTSS, DescTSS.Legacy.au64));
4632 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4633 }
4634
4635 /* The TSS must be present. */
4636 if (!DescTSS.Legacy.Gen.u1Present)
4637 {
4638 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4639 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4640 }
4641
4642 /* Do the actual task switch. */
4643 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4644 }
4645
4646 /* A null CS is bad. */
4647 RTSEL NewCS = Idte.Gate.u16Sel;
4648 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4649 {
4650 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4651 return iemRaiseGeneralProtectionFault0(pVCpu);
4652 }
4653
4654 /* Fetch the descriptor for the new CS. */
4655 IEMSELDESC DescCS;
4656 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4657 if (rcStrict != VINF_SUCCESS)
4658 {
4659 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4660 return rcStrict;
4661 }
4662
4663 /* Must be a code segment. */
4664 if (!DescCS.Legacy.Gen.u1DescType)
4665 {
4666 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4667 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4668 }
4669 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4670 {
4671 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4672 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4673 }
4674
4675 /* Don't allow lowering the privilege level. */
4676 /** @todo Does the lowering of privileges apply to software interrupts
4677 * only? This has bearings on the more-privileged or
4678 * same-privilege stack behavior further down. A testcase would
4679 * be nice. */
4680 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4681 {
4682 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4683 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4684 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4685 }
4686
4687 /* Make sure the selector is present. */
4688 if (!DescCS.Legacy.Gen.u1Present)
4689 {
4690 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4691 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4692 }
4693
4694 /* Check the new EIP against the new CS limit. */
4695 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4696 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4697 ? Idte.Gate.u16OffsetLow
4698 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4699 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4700 if (uNewEip > cbLimitCS)
4701 {
4702 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4703 u8Vector, uNewEip, cbLimitCS, NewCS));
4704 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4705 }
4706 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4707
4708 /* Calc the flag image to push. */
4709 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4710 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4711 fEfl &= ~X86_EFL_RF;
4712 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4713 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4714
4715 /* From V8086 mode only go to CPL 0. */
4716 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4717 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4718 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4719 {
4720 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4721 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4722 }
4723
4724 /*
4725 * If the privilege level changes, we need to get a new stack from the TSS.
4726 * This in turns means validating the new SS and ESP...
4727 */
4728 if (uNewCpl != pVCpu->iem.s.uCpl)
4729 {
4730 RTSEL NewSS;
4731 uint32_t uNewEsp;
4732 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4733 if (rcStrict != VINF_SUCCESS)
4734 return rcStrict;
4735
4736 IEMSELDESC DescSS;
4737 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4738 if (rcStrict != VINF_SUCCESS)
4739 return rcStrict;
4740 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4741 if (!DescSS.Legacy.Gen.u1DefBig)
4742 {
4743 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4744 uNewEsp = (uint16_t)uNewEsp;
4745 }
4746
4747 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4748
4749 /* Check that there is sufficient space for the stack frame. */
4750 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4751 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4752 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4753 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4754
4755 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4756 {
4757 if ( uNewEsp - 1 > cbLimitSS
4758 || uNewEsp < cbStackFrame)
4759 {
4760 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4761 u8Vector, NewSS, uNewEsp, cbStackFrame));
4762 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4763 }
4764 }
4765 else
4766 {
4767 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4768 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4769 {
4770 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4771 u8Vector, NewSS, uNewEsp, cbStackFrame));
4772 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4773 }
4774 }
4775
4776 /*
4777 * Start making changes.
4778 */
4779
4780 /* Set the new CPL so that stack accesses use it. */
4781 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4782 pVCpu->iem.s.uCpl = uNewCpl;
4783
4784 /* Create the stack frame. */
4785 RTPTRUNION uStackFrame;
4786 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4787 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4788 if (rcStrict != VINF_SUCCESS)
4789 return rcStrict;
4790 void * const pvStackFrame = uStackFrame.pv;
4791 if (f32BitGate)
4792 {
4793 if (fFlags & IEM_XCPT_FLAGS_ERR)
4794 *uStackFrame.pu32++ = uErr;
4795 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4796 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4797 uStackFrame.pu32[2] = fEfl;
4798 uStackFrame.pu32[3] = pCtx->esp;
4799 uStackFrame.pu32[4] = pCtx->ss.Sel;
4800 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4801 if (fEfl & X86_EFL_VM)
4802 {
4803 uStackFrame.pu32[1] = pCtx->cs.Sel;
4804 uStackFrame.pu32[5] = pCtx->es.Sel;
4805 uStackFrame.pu32[6] = pCtx->ds.Sel;
4806 uStackFrame.pu32[7] = pCtx->fs.Sel;
4807 uStackFrame.pu32[8] = pCtx->gs.Sel;
4808 }
4809 }
4810 else
4811 {
4812 if (fFlags & IEM_XCPT_FLAGS_ERR)
4813 *uStackFrame.pu16++ = uErr;
4814 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4815 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4816 uStackFrame.pu16[2] = fEfl;
4817 uStackFrame.pu16[3] = pCtx->sp;
4818 uStackFrame.pu16[4] = pCtx->ss.Sel;
4819 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4820 if (fEfl & X86_EFL_VM)
4821 {
4822 uStackFrame.pu16[1] = pCtx->cs.Sel;
4823 uStackFrame.pu16[5] = pCtx->es.Sel;
4824 uStackFrame.pu16[6] = pCtx->ds.Sel;
4825 uStackFrame.pu16[7] = pCtx->fs.Sel;
4826 uStackFrame.pu16[8] = pCtx->gs.Sel;
4827 }
4828 }
4829 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4830 if (rcStrict != VINF_SUCCESS)
4831 return rcStrict;
4832
4833 /* Mark the selectors 'accessed' (hope this is the correct time). */
4834 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4835 * after pushing the stack frame? (Write protect the gdt + stack to
4836 * find out.) */
4837 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4838 {
4839 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4840 if (rcStrict != VINF_SUCCESS)
4841 return rcStrict;
4842 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4843 }
4844
4845 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4846 {
4847 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4848 if (rcStrict != VINF_SUCCESS)
4849 return rcStrict;
4850 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4851 }
4852
4853 /*
4854 * Start comitting the register changes (joins with the DPL=CPL branch).
4855 */
4856 pCtx->ss.Sel = NewSS;
4857 pCtx->ss.ValidSel = NewSS;
4858 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4859 pCtx->ss.u32Limit = cbLimitSS;
4860 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4861 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4862 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4863 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4864 * SP is loaded).
4865 * Need to check the other combinations too:
4866 * - 16-bit TSS, 32-bit handler
4867 * - 32-bit TSS, 16-bit handler */
4868 if (!pCtx->ss.Attr.n.u1DefBig)
4869 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4870 else
4871 pCtx->rsp = uNewEsp - cbStackFrame;
4872
4873 if (fEfl & X86_EFL_VM)
4874 {
4875 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4876 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4877 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4878 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4879 }
4880 }
4881 /*
4882 * Same privilege, no stack change and smaller stack frame.
4883 */
4884 else
4885 {
4886 uint64_t uNewRsp;
4887 RTPTRUNION uStackFrame;
4888 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4889 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4890 if (rcStrict != VINF_SUCCESS)
4891 return rcStrict;
4892 void * const pvStackFrame = uStackFrame.pv;
4893
4894 if (f32BitGate)
4895 {
4896 if (fFlags & IEM_XCPT_FLAGS_ERR)
4897 *uStackFrame.pu32++ = uErr;
4898 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4899 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4900 uStackFrame.pu32[2] = fEfl;
4901 }
4902 else
4903 {
4904 if (fFlags & IEM_XCPT_FLAGS_ERR)
4905 *uStackFrame.pu16++ = uErr;
4906 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4907 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4908 uStackFrame.pu16[2] = fEfl;
4909 }
4910 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913
4914 /* Mark the CS selector as 'accessed'. */
4915 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4916 {
4917 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4918 if (rcStrict != VINF_SUCCESS)
4919 return rcStrict;
4920 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4921 }
4922
4923 /*
4924 * Start committing the register changes (joins with the other branch).
4925 */
4926 pCtx->rsp = uNewRsp;
4927 }
4928
4929 /* ... register committing continues. */
4930 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4931 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4933 pCtx->cs.u32Limit = cbLimitCS;
4934 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4935 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4936
4937 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4938 fEfl &= ~fEflToClear;
4939 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4940
4941 if (fFlags & IEM_XCPT_FLAGS_CR2)
4942 pCtx->cr2 = uCr2;
4943
4944 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4945 iemRaiseXcptAdjustState(pCtx, u8Vector);
4946
4947 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4948}
4949
4950
4951/**
4952 * Implements exceptions and interrupts for long mode.
4953 *
4954 * @returns VBox strict status code.
4955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4956 * @param pCtx The CPU context.
4957 * @param cbInstr The number of bytes to offset rIP by in the return
4958 * address.
4959 * @param u8Vector The interrupt / exception vector number.
4960 * @param fFlags The flags.
4961 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4962 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4963 */
4964IEM_STATIC VBOXSTRICTRC
4965iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4966 PCPUMCTX pCtx,
4967 uint8_t cbInstr,
4968 uint8_t u8Vector,
4969 uint32_t fFlags,
4970 uint16_t uErr,
4971 uint64_t uCr2)
4972{
4973 /*
4974 * Read the IDT entry.
4975 */
4976 uint16_t offIdt = (uint16_t)u8Vector << 4;
4977 if (pCtx->idtr.cbIdt < offIdt + 7)
4978 {
4979 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4980 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4981 }
4982 X86DESC64 Idte;
4983 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4984 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4985 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4986 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4987 return rcStrict;
4988 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4989 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4990 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4991
4992 /*
4993 * Check the descriptor type, DPL and such.
4994 * ASSUMES this is done in the same order as described for call-gate calls.
4995 */
4996 if (Idte.Gate.u1DescType)
4997 {
4998 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4999 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5000 }
5001 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5002 switch (Idte.Gate.u4Type)
5003 {
5004 case AMD64_SEL_TYPE_SYS_INT_GATE:
5005 fEflToClear |= X86_EFL_IF;
5006 break;
5007 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5008 break;
5009
5010 default:
5011 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5012 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5013 }
5014
5015 /* Check DPL against CPL if applicable. */
5016 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5017 {
5018 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5019 {
5020 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5021 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5022 }
5023 }
5024
5025 /* Is it there? */
5026 if (!Idte.Gate.u1Present)
5027 {
5028 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5029 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5030 }
5031
5032 /* A null CS is bad. */
5033 RTSEL NewCS = Idte.Gate.u16Sel;
5034 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5035 {
5036 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5037 return iemRaiseGeneralProtectionFault0(pVCpu);
5038 }
5039
5040 /* Fetch the descriptor for the new CS. */
5041 IEMSELDESC DescCS;
5042 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5043 if (rcStrict != VINF_SUCCESS)
5044 {
5045 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5046 return rcStrict;
5047 }
5048
5049 /* Must be a 64-bit code segment. */
5050 if (!DescCS.Long.Gen.u1DescType)
5051 {
5052 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5053 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5054 }
5055 if ( !DescCS.Long.Gen.u1Long
5056 || DescCS.Long.Gen.u1DefBig
5057 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5058 {
5059 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5060 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5061 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5062 }
5063
5064 /* Don't allow lowering the privilege level. For non-conforming CS
5065 selectors, the CS.DPL sets the privilege level the trap/interrupt
5066 handler runs at. For conforming CS selectors, the CPL remains
5067 unchanged, but the CS.DPL must be <= CPL. */
5068 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5069 * when CPU in Ring-0. Result \#GP? */
5070 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5071 {
5072 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5073 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5074 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5075 }
5076
5077
5078 /* Make sure the selector is present. */
5079 if (!DescCS.Legacy.Gen.u1Present)
5080 {
5081 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5082 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5083 }
5084
5085 /* Check that the new RIP is canonical. */
5086 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5087 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5088 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5089 if (!IEM_IS_CANONICAL(uNewRip))
5090 {
5091 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5092 return iemRaiseGeneralProtectionFault0(pVCpu);
5093 }
5094
5095 /*
5096 * If the privilege level changes or if the IST isn't zero, we need to get
5097 * a new stack from the TSS.
5098 */
5099 uint64_t uNewRsp;
5100 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5101 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5102 if ( uNewCpl != pVCpu->iem.s.uCpl
5103 || Idte.Gate.u3IST != 0)
5104 {
5105 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5106 if (rcStrict != VINF_SUCCESS)
5107 return rcStrict;
5108 }
5109 else
5110 uNewRsp = pCtx->rsp;
5111 uNewRsp &= ~(uint64_t)0xf;
5112
5113 /*
5114 * Calc the flag image to push.
5115 */
5116 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5117 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5118 fEfl &= ~X86_EFL_RF;
5119 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5120 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5121
5122 /*
5123 * Start making changes.
5124 */
5125 /* Set the new CPL so that stack accesses use it. */
5126 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5127 pVCpu->iem.s.uCpl = uNewCpl;
5128
5129 /* Create the stack frame. */
5130 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5131 RTPTRUNION uStackFrame;
5132 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5133 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5134 if (rcStrict != VINF_SUCCESS)
5135 return rcStrict;
5136 void * const pvStackFrame = uStackFrame.pv;
5137
5138 if (fFlags & IEM_XCPT_FLAGS_ERR)
5139 *uStackFrame.pu64++ = uErr;
5140 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5141 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5142 uStackFrame.pu64[2] = fEfl;
5143 uStackFrame.pu64[3] = pCtx->rsp;
5144 uStackFrame.pu64[4] = pCtx->ss.Sel;
5145 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5146 if (rcStrict != VINF_SUCCESS)
5147 return rcStrict;
5148
5149 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5150 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5151 * after pushing the stack frame? (Write protect the gdt + stack to
5152 * find out.) */
5153 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5154 {
5155 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5156 if (rcStrict != VINF_SUCCESS)
5157 return rcStrict;
5158 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5159 }
5160
5161 /*
5162 * Start comitting the register changes.
5163 */
5164 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5165 * hidden registers when interrupting 32-bit or 16-bit code! */
5166 if (uNewCpl != uOldCpl)
5167 {
5168 pCtx->ss.Sel = 0 | uNewCpl;
5169 pCtx->ss.ValidSel = 0 | uNewCpl;
5170 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5171 pCtx->ss.u32Limit = UINT32_MAX;
5172 pCtx->ss.u64Base = 0;
5173 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5174 }
5175 pCtx->rsp = uNewRsp - cbStackFrame;
5176 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5177 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5178 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5179 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5180 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5181 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5182 pCtx->rip = uNewRip;
5183
5184 fEfl &= ~fEflToClear;
5185 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5186
5187 if (fFlags & IEM_XCPT_FLAGS_CR2)
5188 pCtx->cr2 = uCr2;
5189
5190 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5191 iemRaiseXcptAdjustState(pCtx, u8Vector);
5192
5193 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5194}
5195
5196
5197/**
5198 * Implements exceptions and interrupts.
5199 *
5200 * All exceptions and interrupts goes thru this function!
5201 *
5202 * @returns VBox strict status code.
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param cbInstr The number of bytes to offset rIP by in the return
5205 * address.
5206 * @param u8Vector The interrupt / exception vector number.
5207 * @param fFlags The flags.
5208 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5209 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5210 */
5211DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5212iemRaiseXcptOrInt(PVMCPU pVCpu,
5213 uint8_t cbInstr,
5214 uint8_t u8Vector,
5215 uint32_t fFlags,
5216 uint16_t uErr,
5217 uint64_t uCr2)
5218{
5219 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5220#ifdef IN_RING0
5221 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5222 AssertRCReturn(rc, rc);
5223#endif
5224
5225#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5226 /*
5227 * Flush prefetch buffer
5228 */
5229 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5230#endif
5231
5232 /*
5233 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5234 */
5235 if ( pCtx->eflags.Bits.u1VM
5236 && pCtx->eflags.Bits.u2IOPL != 3
5237 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5238 && (pCtx->cr0 & X86_CR0_PE) )
5239 {
5240 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5241 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5242 u8Vector = X86_XCPT_GP;
5243 uErr = 0;
5244 }
5245#ifdef DBGFTRACE_ENABLED
5246 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5247 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5248 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5249#endif
5250
5251#ifdef VBOX_WITH_NESTED_HWVIRT
5252 if (IEM_IS_SVM_ENABLED(pVCpu))
5253 {
5254 /*
5255 * Handle nested-guest SVM exception and software interrupt intercepts,
5256 * see AMD spec. 15.12 "Exception Intercepts".
5257 *
5258 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
5259 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
5260 * even when they use a vector in the range 0 to 31.
5261 * - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1.
5262 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
5263 */
5264 /* Check NMI intercept */
5265 if ( u8Vector == X86_XCPT_NMI
5266 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
5267 {
5268 Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n"));
5269 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5270 }
5271
5272 /* Check CPU exception intercepts. */
5273 if ( IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector)
5274 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5275 {
5276 Assert(u8Vector <= X86_XCPT_LAST);
5277 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
5278 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
5279 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
5280 && u8Vector == X86_XCPT_PF
5281 && !(uErr & X86_TRAP_PF_ID))
5282 {
5283 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
5284#ifdef IEM_WITH_CODE_TLB
5285#else
5286 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
5287 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
5288 if ( cbCurrent > 0
5289 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
5290 {
5291 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
5292 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
5293 }
5294#endif
5295 }
5296 Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector,
5297 uExitInfo1, uExitInfo2));
5298 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
5299 }
5300
5301 /* Check software interrupt (INTn) intercepts. */
5302 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN)
5303 && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
5304 {
5305 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
5306 Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
5307 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
5308 }
5309 }
5310#endif /* VBOX_WITH_NESTED_HWVIRT */
5311
5312 /*
5313 * Do recursion accounting.
5314 */
5315 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5316 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5317 if (pVCpu->iem.s.cXcptRecursions == 0)
5318 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5319 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5320 else
5321 {
5322 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5323 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5324
5325 /** @todo double and tripple faults. */
5326 /** @todo When implementing \#DF, the SVM nested-guest \#DF intercepts needs
5327 * some care. See AMD spec. 15.12 "Exception Intercepts". */
5328 if (pVCpu->iem.s.cXcptRecursions >= 3)
5329 {
5330#ifdef DEBUG_bird
5331 AssertFailed();
5332#endif
5333 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5334 }
5335
5336 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5337 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5338 {
5339 ....
5340 } */
5341 }
5342 pVCpu->iem.s.cXcptRecursions++;
5343 pVCpu->iem.s.uCurXcpt = u8Vector;
5344 pVCpu->iem.s.fCurXcpt = fFlags;
5345 pVCpu->iem.s.uCurXcptErr = uErr;
5346 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5347
5348 /*
5349 * Extensive logging.
5350 */
5351#if defined(LOG_ENABLED) && defined(IN_RING3)
5352 if (LogIs3Enabled())
5353 {
5354 PVM pVM = pVCpu->CTX_SUFF(pVM);
5355 char szRegs[4096];
5356 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5357 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5358 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5359 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5360 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5361 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5362 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5363 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5364 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5365 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5366 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5367 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5368 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5369 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5370 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5371 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5372 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5373 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5374 " efer=%016VR{efer}\n"
5375 " pat=%016VR{pat}\n"
5376 " sf_mask=%016VR{sf_mask}\n"
5377 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5378 " lstar=%016VR{lstar}\n"
5379 " star=%016VR{star} cstar=%016VR{cstar}\n"
5380 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5381 );
5382
5383 char szInstr[256];
5384 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5385 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5386 szInstr, sizeof(szInstr), NULL);
5387 Log3(("%s%s\n", szRegs, szInstr));
5388 }
5389#endif /* LOG_ENABLED */
5390
5391 /*
5392 * Call the mode specific worker function.
5393 */
5394 VBOXSTRICTRC rcStrict;
5395 if (!(pCtx->cr0 & X86_CR0_PE))
5396 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5397 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5398 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5399 else
5400 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5401
5402 /* Flush the prefetch buffer. */
5403#ifdef IEM_WITH_CODE_TLB
5404 pVCpu->iem.s.pbInstrBuf = NULL;
5405#else
5406 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5407#endif
5408
5409 /*
5410 * Unwind.
5411 */
5412 pVCpu->iem.s.cXcptRecursions--;
5413 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5414 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5415 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5416 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5417 return rcStrict;
5418}
5419
5420#ifdef IEM_WITH_SETJMP
5421/**
5422 * See iemRaiseXcptOrInt. Will not return.
5423 */
5424IEM_STATIC DECL_NO_RETURN(void)
5425iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5426 uint8_t cbInstr,
5427 uint8_t u8Vector,
5428 uint32_t fFlags,
5429 uint16_t uErr,
5430 uint64_t uCr2)
5431{
5432 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5433 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5434}
5435#endif
5436
5437
5438/** \#DE - 00. */
5439DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5440{
5441 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5442}
5443
5444
5445/** \#DB - 01.
5446 * @note This automatically clear DR7.GD. */
5447DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5448{
5449 /** @todo set/clear RF. */
5450 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5451 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5452}
5453
5454
5455/** \#BR - 05. */
5456DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5457{
5458 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5459}
5460
5461
5462/** \#UD - 06. */
5463DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5464{
5465 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5466}
5467
5468
5469/** \#NM - 07. */
5470DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5471{
5472 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5473}
5474
5475
5476/** \#TS(err) - 0a. */
5477DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5478{
5479 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5480}
5481
5482
5483/** \#TS(tr) - 0a. */
5484DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5485{
5486 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5487 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5488}
5489
5490
5491/** \#TS(0) - 0a. */
5492DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5493{
5494 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5495 0, 0);
5496}
5497
5498
5499/** \#TS(err) - 0a. */
5500DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5501{
5502 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5503 uSel & X86_SEL_MASK_OFF_RPL, 0);
5504}
5505
5506
5507/** \#NP(err) - 0b. */
5508DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5509{
5510 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5511}
5512
5513
5514/** \#NP(sel) - 0b. */
5515DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5516{
5517 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5518 uSel & ~X86_SEL_RPL, 0);
5519}
5520
5521
5522/** \#SS(seg) - 0c. */
5523DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5524{
5525 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5526 uSel & ~X86_SEL_RPL, 0);
5527}
5528
5529
5530/** \#SS(err) - 0c. */
5531DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5532{
5533 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5534}
5535
5536
5537/** \#GP(n) - 0d. */
5538DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5539{
5540 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5541}
5542
5543
5544/** \#GP(0) - 0d. */
5545DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5546{
5547 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5548}
5549
5550#ifdef IEM_WITH_SETJMP
5551/** \#GP(0) - 0d. */
5552DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5553{
5554 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5555}
5556#endif
5557
5558
5559/** \#GP(sel) - 0d. */
5560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5561{
5562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5563 Sel & ~X86_SEL_RPL, 0);
5564}
5565
5566
5567/** \#GP(0) - 0d. */
5568DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5569{
5570 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5571}
5572
5573
5574/** \#GP(sel) - 0d. */
5575DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5576{
5577 NOREF(iSegReg); NOREF(fAccess);
5578 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5579 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5580}
5581
5582#ifdef IEM_WITH_SETJMP
5583/** \#GP(sel) - 0d, longjmp. */
5584DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5585{
5586 NOREF(iSegReg); NOREF(fAccess);
5587 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5588 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5589}
5590#endif
5591
5592/** \#GP(sel) - 0d. */
5593DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5594{
5595 NOREF(Sel);
5596 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5597}
5598
5599#ifdef IEM_WITH_SETJMP
5600/** \#GP(sel) - 0d, longjmp. */
5601DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5602{
5603 NOREF(Sel);
5604 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5605}
5606#endif
5607
5608
5609/** \#GP(sel) - 0d. */
5610DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5611{
5612 NOREF(iSegReg); NOREF(fAccess);
5613 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5614}
5615
5616#ifdef IEM_WITH_SETJMP
5617/** \#GP(sel) - 0d, longjmp. */
5618DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5619 uint32_t fAccess)
5620{
5621 NOREF(iSegReg); NOREF(fAccess);
5622 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5623}
5624#endif
5625
5626
5627/** \#PF(n) - 0e. */
5628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5629{
5630 uint16_t uErr;
5631 switch (rc)
5632 {
5633 case VERR_PAGE_NOT_PRESENT:
5634 case VERR_PAGE_TABLE_NOT_PRESENT:
5635 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5636 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5637 uErr = 0;
5638 break;
5639
5640 default:
5641 AssertMsgFailed(("%Rrc\n", rc));
5642 /* fall thru */
5643 case VERR_ACCESS_DENIED:
5644 uErr = X86_TRAP_PF_P;
5645 break;
5646
5647 /** @todo reserved */
5648 }
5649
5650 if (pVCpu->iem.s.uCpl == 3)
5651 uErr |= X86_TRAP_PF_US;
5652
5653 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5654 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5655 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5656 uErr |= X86_TRAP_PF_ID;
5657
5658#if 0 /* This is so much non-sense, really. Why was it done like that? */
5659 /* Note! RW access callers reporting a WRITE protection fault, will clear
5660 the READ flag before calling. So, read-modify-write accesses (RW)
5661 can safely be reported as READ faults. */
5662 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5663 uErr |= X86_TRAP_PF_RW;
5664#else
5665 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5666 {
5667 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5668 uErr |= X86_TRAP_PF_RW;
5669 }
5670#endif
5671
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5673 uErr, GCPtrWhere);
5674}
5675
5676#ifdef IEM_WITH_SETJMP
5677/** \#PF(n) - 0e, longjmp. */
5678IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5679{
5680 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5681}
5682#endif
5683
5684
5685/** \#MF(0) - 10. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5689}
5690
5691
5692/** \#AC(0) - 11. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/**
5700 * Macro for calling iemCImplRaiseDivideError().
5701 *
5702 * This enables us to add/remove arguments and force different levels of
5703 * inlining as we wish.
5704 *
5705 * @return Strict VBox status code.
5706 */
5707#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5708IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5709{
5710 NOREF(cbInstr);
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5712}
5713
5714
5715/**
5716 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5717 *
5718 * This enables us to add/remove arguments and force different levels of
5719 * inlining as we wish.
5720 *
5721 * @return Strict VBox status code.
5722 */
5723#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5724IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5725{
5726 NOREF(cbInstr);
5727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5728}
5729
5730
5731/**
5732 * Macro for calling iemCImplRaiseInvalidOpcode().
5733 *
5734 * This enables us to add/remove arguments and force different levels of
5735 * inlining as we wish.
5736 *
5737 * @return Strict VBox status code.
5738 */
5739#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5740IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5741{
5742 NOREF(cbInstr);
5743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5744}
5745
5746
5747/** @} */
5748
5749
5750/*
5751 *
5752 * Helpers routines.
5753 * Helpers routines.
5754 * Helpers routines.
5755 *
5756 */
5757
5758/**
5759 * Recalculates the effective operand size.
5760 *
5761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5762 */
5763IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5764{
5765 switch (pVCpu->iem.s.enmCpuMode)
5766 {
5767 case IEMMODE_16BIT:
5768 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5769 break;
5770 case IEMMODE_32BIT:
5771 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5772 break;
5773 case IEMMODE_64BIT:
5774 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5775 {
5776 case 0:
5777 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5778 break;
5779 case IEM_OP_PRF_SIZE_OP:
5780 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5781 break;
5782 case IEM_OP_PRF_SIZE_REX_W:
5783 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5784 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5785 break;
5786 }
5787 break;
5788 default:
5789 AssertFailed();
5790 }
5791}
5792
5793
5794/**
5795 * Sets the default operand size to 64-bit and recalculates the effective
5796 * operand size.
5797 *
5798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5799 */
5800IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5801{
5802 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5803 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5804 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5805 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5806 else
5807 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5808}
5809
5810
5811/*
5812 *
5813 * Common opcode decoders.
5814 * Common opcode decoders.
5815 * Common opcode decoders.
5816 *
5817 */
5818//#include <iprt/mem.h>
5819
5820/**
5821 * Used to add extra details about a stub case.
5822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5823 */
5824IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5825{
5826#if defined(LOG_ENABLED) && defined(IN_RING3)
5827 PVM pVM = pVCpu->CTX_SUFF(pVM);
5828 char szRegs[4096];
5829 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5830 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5831 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5832 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5833 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5834 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5835 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5836 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5837 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5838 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5839 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5840 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5841 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5842 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5843 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5844 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5845 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5846 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5847 " efer=%016VR{efer}\n"
5848 " pat=%016VR{pat}\n"
5849 " sf_mask=%016VR{sf_mask}\n"
5850 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5851 " lstar=%016VR{lstar}\n"
5852 " star=%016VR{star} cstar=%016VR{cstar}\n"
5853 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5854 );
5855
5856 char szInstr[256];
5857 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5858 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5859 szInstr, sizeof(szInstr), NULL);
5860
5861 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5862#else
5863 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5864#endif
5865}
5866
5867/**
5868 * Complains about a stub.
5869 *
5870 * Providing two versions of this macro, one for daily use and one for use when
5871 * working on IEM.
5872 */
5873#if 0
5874# define IEMOP_BITCH_ABOUT_STUB() \
5875 do { \
5876 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5877 iemOpStubMsg2(pVCpu); \
5878 RTAssertPanic(); \
5879 } while (0)
5880#else
5881# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5882#endif
5883
5884/** Stubs an opcode. */
5885#define FNIEMOP_STUB(a_Name) \
5886 FNIEMOP_DEF(a_Name) \
5887 { \
5888 RT_NOREF_PV(pVCpu); \
5889 IEMOP_BITCH_ABOUT_STUB(); \
5890 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5891 } \
5892 typedef int ignore_semicolon
5893
5894/** Stubs an opcode. */
5895#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5896 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5897 { \
5898 RT_NOREF_PV(pVCpu); \
5899 RT_NOREF_PV(a_Name0); \
5900 IEMOP_BITCH_ABOUT_STUB(); \
5901 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5902 } \
5903 typedef int ignore_semicolon
5904
5905/** Stubs an opcode which currently should raise \#UD. */
5906#define FNIEMOP_UD_STUB(a_Name) \
5907 FNIEMOP_DEF(a_Name) \
5908 { \
5909 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5910 return IEMOP_RAISE_INVALID_OPCODE(); \
5911 } \
5912 typedef int ignore_semicolon
5913
5914/** Stubs an opcode which currently should raise \#UD. */
5915#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5916 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5917 { \
5918 RT_NOREF_PV(pVCpu); \
5919 RT_NOREF_PV(a_Name0); \
5920 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5921 return IEMOP_RAISE_INVALID_OPCODE(); \
5922 } \
5923 typedef int ignore_semicolon
5924
5925
5926
5927/** @name Register Access.
5928 * @{
5929 */
5930
5931/**
5932 * Gets a reference (pointer) to the specified hidden segment register.
5933 *
5934 * @returns Hidden register reference.
5935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5936 * @param iSegReg The segment register.
5937 */
5938IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5939{
5940 Assert(iSegReg < X86_SREG_COUNT);
5941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5942 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5943
5944#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5945 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5946 { /* likely */ }
5947 else
5948 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5949#else
5950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5951#endif
5952 return pSReg;
5953}
5954
5955
5956/**
5957 * Ensures that the given hidden segment register is up to date.
5958 *
5959 * @returns Hidden register reference.
5960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5961 * @param pSReg The segment register.
5962 */
5963IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5964{
5965#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5966 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5967 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5968#else
5969 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5970 NOREF(pVCpu);
5971#endif
5972 return pSReg;
5973}
5974
5975
5976/**
5977 * Gets a reference (pointer) to the specified segment register (the selector
5978 * value).
5979 *
5980 * @returns Pointer to the selector variable.
5981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5982 * @param iSegReg The segment register.
5983 */
5984DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5985{
5986 Assert(iSegReg < X86_SREG_COUNT);
5987 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5988 return &pCtx->aSRegs[iSegReg].Sel;
5989}
5990
5991
5992/**
5993 * Fetches the selector value of a segment register.
5994 *
5995 * @returns The selector value.
5996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5997 * @param iSegReg The segment register.
5998 */
5999DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6000{
6001 Assert(iSegReg < X86_SREG_COUNT);
6002 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6003}
6004
6005
6006/**
6007 * Gets a reference (pointer) to the specified general purpose register.
6008 *
6009 * @returns Register reference.
6010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6011 * @param iReg The general purpose register.
6012 */
6013DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6014{
6015 Assert(iReg < 16);
6016 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6017 return &pCtx->aGRegs[iReg];
6018}
6019
6020
6021/**
6022 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6023 *
6024 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6025 *
6026 * @returns Register reference.
6027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6028 * @param iReg The register.
6029 */
6030DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6031{
6032 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6033 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6034 {
6035 Assert(iReg < 16);
6036 return &pCtx->aGRegs[iReg].u8;
6037 }
6038 /* high 8-bit register. */
6039 Assert(iReg < 8);
6040 return &pCtx->aGRegs[iReg & 3].bHi;
6041}
6042
6043
6044/**
6045 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6046 *
6047 * @returns Register reference.
6048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6049 * @param iReg The register.
6050 */
6051DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6052{
6053 Assert(iReg < 16);
6054 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6055 return &pCtx->aGRegs[iReg].u16;
6056}
6057
6058
6059/**
6060 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6061 *
6062 * @returns Register reference.
6063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6064 * @param iReg The register.
6065 */
6066DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6067{
6068 Assert(iReg < 16);
6069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6070 return &pCtx->aGRegs[iReg].u32;
6071}
6072
6073
6074/**
6075 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6076 *
6077 * @returns Register reference.
6078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6079 * @param iReg The register.
6080 */
6081DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6082{
6083 Assert(iReg < 64);
6084 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6085 return &pCtx->aGRegs[iReg].u64;
6086}
6087
6088
6089/**
6090 * Fetches the value of a 8-bit general purpose register.
6091 *
6092 * @returns The register value.
6093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6094 * @param iReg The register.
6095 */
6096DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6097{
6098 return *iemGRegRefU8(pVCpu, iReg);
6099}
6100
6101
6102/**
6103 * Fetches the value of a 16-bit general purpose register.
6104 *
6105 * @returns The register value.
6106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6107 * @param iReg The register.
6108 */
6109DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6110{
6111 Assert(iReg < 16);
6112 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6113}
6114
6115
6116/**
6117 * Fetches the value of a 32-bit general purpose register.
6118 *
6119 * @returns The register value.
6120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6121 * @param iReg The register.
6122 */
6123DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6124{
6125 Assert(iReg < 16);
6126 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6127}
6128
6129
6130/**
6131 * Fetches the value of a 64-bit general purpose register.
6132 *
6133 * @returns The register value.
6134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6135 * @param iReg The register.
6136 */
6137DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6138{
6139 Assert(iReg < 16);
6140 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6141}
6142
6143
6144/**
6145 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6146 *
6147 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6148 * segment limit.
6149 *
6150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6151 * @param offNextInstr The offset of the next instruction.
6152 */
6153IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6154{
6155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6156 switch (pVCpu->iem.s.enmEffOpSize)
6157 {
6158 case IEMMODE_16BIT:
6159 {
6160 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6161 if ( uNewIp > pCtx->cs.u32Limit
6162 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6163 return iemRaiseGeneralProtectionFault0(pVCpu);
6164 pCtx->rip = uNewIp;
6165 break;
6166 }
6167
6168 case IEMMODE_32BIT:
6169 {
6170 Assert(pCtx->rip <= UINT32_MAX);
6171 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6172
6173 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6174 if (uNewEip > pCtx->cs.u32Limit)
6175 return iemRaiseGeneralProtectionFault0(pVCpu);
6176 pCtx->rip = uNewEip;
6177 break;
6178 }
6179
6180 case IEMMODE_64BIT:
6181 {
6182 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6183
6184 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6185 if (!IEM_IS_CANONICAL(uNewRip))
6186 return iemRaiseGeneralProtectionFault0(pVCpu);
6187 pCtx->rip = uNewRip;
6188 break;
6189 }
6190
6191 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6192 }
6193
6194 pCtx->eflags.Bits.u1RF = 0;
6195
6196#ifndef IEM_WITH_CODE_TLB
6197 /* Flush the prefetch buffer. */
6198 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6199#endif
6200
6201 return VINF_SUCCESS;
6202}
6203
6204
6205/**
6206 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6207 *
6208 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6209 * segment limit.
6210 *
6211 * @returns Strict VBox status code.
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param offNextInstr The offset of the next instruction.
6214 */
6215IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6216{
6217 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6218 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6219
6220 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6221 if ( uNewIp > pCtx->cs.u32Limit
6222 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6223 return iemRaiseGeneralProtectionFault0(pVCpu);
6224 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6225 pCtx->rip = uNewIp;
6226 pCtx->eflags.Bits.u1RF = 0;
6227
6228#ifndef IEM_WITH_CODE_TLB
6229 /* Flush the prefetch buffer. */
6230 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6231#endif
6232
6233 return VINF_SUCCESS;
6234}
6235
6236
6237/**
6238 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6239 *
6240 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6241 * segment limit.
6242 *
6243 * @returns Strict VBox status code.
6244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6245 * @param offNextInstr The offset of the next instruction.
6246 */
6247IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6248{
6249 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6250 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6251
6252 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6253 {
6254 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6255
6256 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6257 if (uNewEip > pCtx->cs.u32Limit)
6258 return iemRaiseGeneralProtectionFault0(pVCpu);
6259 pCtx->rip = uNewEip;
6260 }
6261 else
6262 {
6263 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6264
6265 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6266 if (!IEM_IS_CANONICAL(uNewRip))
6267 return iemRaiseGeneralProtectionFault0(pVCpu);
6268 pCtx->rip = uNewRip;
6269 }
6270 pCtx->eflags.Bits.u1RF = 0;
6271
6272#ifndef IEM_WITH_CODE_TLB
6273 /* Flush the prefetch buffer. */
6274 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6275#endif
6276
6277 return VINF_SUCCESS;
6278}
6279
6280
6281/**
6282 * Performs a near jump to the specified address.
6283 *
6284 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6285 * segment limit.
6286 *
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param uNewRip The new RIP value.
6289 */
6290IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6291{
6292 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6293 switch (pVCpu->iem.s.enmEffOpSize)
6294 {
6295 case IEMMODE_16BIT:
6296 {
6297 Assert(uNewRip <= UINT16_MAX);
6298 if ( uNewRip > pCtx->cs.u32Limit
6299 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6300 return iemRaiseGeneralProtectionFault0(pVCpu);
6301 /** @todo Test 16-bit jump in 64-bit mode. */
6302 pCtx->rip = uNewRip;
6303 break;
6304 }
6305
6306 case IEMMODE_32BIT:
6307 {
6308 Assert(uNewRip <= UINT32_MAX);
6309 Assert(pCtx->rip <= UINT32_MAX);
6310 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6311
6312 if (uNewRip > pCtx->cs.u32Limit)
6313 return iemRaiseGeneralProtectionFault0(pVCpu);
6314 pCtx->rip = uNewRip;
6315 break;
6316 }
6317
6318 case IEMMODE_64BIT:
6319 {
6320 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6321
6322 if (!IEM_IS_CANONICAL(uNewRip))
6323 return iemRaiseGeneralProtectionFault0(pVCpu);
6324 pCtx->rip = uNewRip;
6325 break;
6326 }
6327
6328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6329 }
6330
6331 pCtx->eflags.Bits.u1RF = 0;
6332
6333#ifndef IEM_WITH_CODE_TLB
6334 /* Flush the prefetch buffer. */
6335 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6336#endif
6337
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/**
6343 * Get the address of the top of the stack.
6344 *
6345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6346 * @param pCtx The CPU context which SP/ESP/RSP should be
6347 * read.
6348 */
6349DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6350{
6351 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6352 return pCtx->rsp;
6353 if (pCtx->ss.Attr.n.u1DefBig)
6354 return pCtx->esp;
6355 return pCtx->sp;
6356}
6357
6358
6359/**
6360 * Updates the RIP/EIP/IP to point to the next instruction.
6361 *
6362 * This function leaves the EFLAGS.RF flag alone.
6363 *
6364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6365 * @param cbInstr The number of bytes to add.
6366 */
6367IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6368{
6369 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6370 switch (pVCpu->iem.s.enmCpuMode)
6371 {
6372 case IEMMODE_16BIT:
6373 Assert(pCtx->rip <= UINT16_MAX);
6374 pCtx->eip += cbInstr;
6375 pCtx->eip &= UINT32_C(0xffff);
6376 break;
6377
6378 case IEMMODE_32BIT:
6379 pCtx->eip += cbInstr;
6380 Assert(pCtx->rip <= UINT32_MAX);
6381 break;
6382
6383 case IEMMODE_64BIT:
6384 pCtx->rip += cbInstr;
6385 break;
6386 default: AssertFailed();
6387 }
6388}
6389
6390
6391#if 0
6392/**
6393 * Updates the RIP/EIP/IP to point to the next instruction.
6394 *
6395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6396 */
6397IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6398{
6399 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6400}
6401#endif
6402
6403
6404
6405/**
6406 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6407 *
6408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6409 * @param cbInstr The number of bytes to add.
6410 */
6411IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6412{
6413 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6414
6415 pCtx->eflags.Bits.u1RF = 0;
6416
6417 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6418#if ARCH_BITS >= 64
6419 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6420 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6421 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6422#else
6423 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6424 pCtx->rip += cbInstr;
6425 else
6426 {
6427 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6428 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6429 }
6430#endif
6431}
6432
6433
6434/**
6435 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6436 *
6437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6438 */
6439IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6440{
6441 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6442}
6443
6444
6445/**
6446 * Adds to the stack pointer.
6447 *
6448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6449 * @param pCtx The CPU context which SP/ESP/RSP should be
6450 * updated.
6451 * @param cbToAdd The number of bytes to add (8-bit!).
6452 */
6453DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6454{
6455 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6456 pCtx->rsp += cbToAdd;
6457 else if (pCtx->ss.Attr.n.u1DefBig)
6458 pCtx->esp += cbToAdd;
6459 else
6460 pCtx->sp += cbToAdd;
6461}
6462
6463
6464/**
6465 * Subtracts from the stack pointer.
6466 *
6467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6468 * @param pCtx The CPU context which SP/ESP/RSP should be
6469 * updated.
6470 * @param cbToSub The number of bytes to subtract (8-bit!).
6471 */
6472DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6473{
6474 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6475 pCtx->rsp -= cbToSub;
6476 else if (pCtx->ss.Attr.n.u1DefBig)
6477 pCtx->esp -= cbToSub;
6478 else
6479 pCtx->sp -= cbToSub;
6480}
6481
6482
6483/**
6484 * Adds to the temporary stack pointer.
6485 *
6486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6487 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6488 * @param cbToAdd The number of bytes to add (16-bit).
6489 * @param pCtx Where to get the current stack mode.
6490 */
6491DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6492{
6493 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6494 pTmpRsp->u += cbToAdd;
6495 else if (pCtx->ss.Attr.n.u1DefBig)
6496 pTmpRsp->DWords.dw0 += cbToAdd;
6497 else
6498 pTmpRsp->Words.w0 += cbToAdd;
6499}
6500
6501
6502/**
6503 * Subtracts from the temporary stack pointer.
6504 *
6505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6506 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6507 * @param cbToSub The number of bytes to subtract.
6508 * @param pCtx Where to get the current stack mode.
6509 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6510 * expecting that.
6511 */
6512DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6513{
6514 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6515 pTmpRsp->u -= cbToSub;
6516 else if (pCtx->ss.Attr.n.u1DefBig)
6517 pTmpRsp->DWords.dw0 -= cbToSub;
6518 else
6519 pTmpRsp->Words.w0 -= cbToSub;
6520}
6521
6522
6523/**
6524 * Calculates the effective stack address for a push of the specified size as
6525 * well as the new RSP value (upper bits may be masked).
6526 *
6527 * @returns Effective stack addressf for the push.
6528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6529 * @param pCtx Where to get the current stack mode.
6530 * @param cbItem The size of the stack item to pop.
6531 * @param puNewRsp Where to return the new RSP value.
6532 */
6533DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6534{
6535 RTUINT64U uTmpRsp;
6536 RTGCPTR GCPtrTop;
6537 uTmpRsp.u = pCtx->rsp;
6538
6539 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6540 GCPtrTop = uTmpRsp.u -= cbItem;
6541 else if (pCtx->ss.Attr.n.u1DefBig)
6542 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6543 else
6544 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6545 *puNewRsp = uTmpRsp.u;
6546 return GCPtrTop;
6547}
6548
6549
6550/**
6551 * Gets the current stack pointer and calculates the value after a pop of the
6552 * specified size.
6553 *
6554 * @returns Current stack pointer.
6555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6556 * @param pCtx Where to get the current stack mode.
6557 * @param cbItem The size of the stack item to pop.
6558 * @param puNewRsp Where to return the new RSP value.
6559 */
6560DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6561{
6562 RTUINT64U uTmpRsp;
6563 RTGCPTR GCPtrTop;
6564 uTmpRsp.u = pCtx->rsp;
6565
6566 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6567 {
6568 GCPtrTop = uTmpRsp.u;
6569 uTmpRsp.u += cbItem;
6570 }
6571 else if (pCtx->ss.Attr.n.u1DefBig)
6572 {
6573 GCPtrTop = uTmpRsp.DWords.dw0;
6574 uTmpRsp.DWords.dw0 += cbItem;
6575 }
6576 else
6577 {
6578 GCPtrTop = uTmpRsp.Words.w0;
6579 uTmpRsp.Words.w0 += cbItem;
6580 }
6581 *puNewRsp = uTmpRsp.u;
6582 return GCPtrTop;
6583}
6584
6585
6586/**
6587 * Calculates the effective stack address for a push of the specified size as
6588 * well as the new temporary RSP value (upper bits may be masked).
6589 *
6590 * @returns Effective stack addressf for the push.
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param pCtx Where to get the current stack mode.
6593 * @param pTmpRsp The temporary stack pointer. This is updated.
6594 * @param cbItem The size of the stack item to pop.
6595 */
6596DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6597{
6598 RTGCPTR GCPtrTop;
6599
6600 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6601 GCPtrTop = pTmpRsp->u -= cbItem;
6602 else if (pCtx->ss.Attr.n.u1DefBig)
6603 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6604 else
6605 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6606 return GCPtrTop;
6607}
6608
6609
6610/**
6611 * Gets the effective stack address for a pop of the specified size and
6612 * calculates and updates the temporary RSP.
6613 *
6614 * @returns Current stack pointer.
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 * @param pCtx Where to get the current stack mode.
6617 * @param pTmpRsp The temporary stack pointer. This is updated.
6618 * @param cbItem The size of the stack item to pop.
6619 */
6620DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6621{
6622 RTGCPTR GCPtrTop;
6623 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6624 {
6625 GCPtrTop = pTmpRsp->u;
6626 pTmpRsp->u += cbItem;
6627 }
6628 else if (pCtx->ss.Attr.n.u1DefBig)
6629 {
6630 GCPtrTop = pTmpRsp->DWords.dw0;
6631 pTmpRsp->DWords.dw0 += cbItem;
6632 }
6633 else
6634 {
6635 GCPtrTop = pTmpRsp->Words.w0;
6636 pTmpRsp->Words.w0 += cbItem;
6637 }
6638 return GCPtrTop;
6639}
6640
6641/** @} */
6642
6643
6644/** @name FPU access and helpers.
6645 *
6646 * @{
6647 */
6648
6649
6650/**
6651 * Hook for preparing to use the host FPU.
6652 *
6653 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6654 *
6655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6656 */
6657DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6658{
6659#ifdef IN_RING3
6660 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6661#else
6662 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6663#endif
6664}
6665
6666
6667/**
6668 * Hook for preparing to use the host FPU for SSE
6669 *
6670 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6671 *
6672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6673 */
6674DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6675{
6676 iemFpuPrepareUsage(pVCpu);
6677}
6678
6679
6680/**
6681 * Hook for actualizing the guest FPU state before the interpreter reads it.
6682 *
6683 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 */
6687DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6688{
6689#ifdef IN_RING3
6690 NOREF(pVCpu);
6691#else
6692 CPUMRZFpuStateActualizeForRead(pVCpu);
6693#endif
6694}
6695
6696
6697/**
6698 * Hook for actualizing the guest FPU state before the interpreter changes it.
6699 *
6700 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6701 *
6702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6703 */
6704DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6705{
6706#ifdef IN_RING3
6707 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6708#else
6709 CPUMRZFpuStateActualizeForChange(pVCpu);
6710#endif
6711}
6712
6713
6714/**
6715 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6716 * only.
6717 *
6718 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6719 *
6720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6721 */
6722DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6723{
6724#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6725 NOREF(pVCpu);
6726#else
6727 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6728#endif
6729}
6730
6731
6732/**
6733 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6734 * read+write.
6735 *
6736 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6737 *
6738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6739 */
6740DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6741{
6742#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6743 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6744#else
6745 CPUMRZFpuStateActualizeForChange(pVCpu);
6746#endif
6747}
6748
6749
6750/**
6751 * Stores a QNaN value into a FPU register.
6752 *
6753 * @param pReg Pointer to the register.
6754 */
6755DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6756{
6757 pReg->au32[0] = UINT32_C(0x00000000);
6758 pReg->au32[1] = UINT32_C(0xc0000000);
6759 pReg->au16[4] = UINT16_C(0xffff);
6760}
6761
6762
6763/**
6764 * Updates the FOP, FPU.CS and FPUIP registers.
6765 *
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param pCtx The CPU context.
6768 * @param pFpuCtx The FPU context.
6769 */
6770DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6771{
6772 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6773 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6774 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6775 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6776 {
6777 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6778 * happens in real mode here based on the fnsave and fnstenv images. */
6779 pFpuCtx->CS = 0;
6780 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6781 }
6782 else
6783 {
6784 pFpuCtx->CS = pCtx->cs.Sel;
6785 pFpuCtx->FPUIP = pCtx->rip;
6786 }
6787}
6788
6789
6790/**
6791 * Updates the x87.DS and FPUDP registers.
6792 *
6793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6794 * @param pCtx The CPU context.
6795 * @param pFpuCtx The FPU context.
6796 * @param iEffSeg The effective segment register.
6797 * @param GCPtrEff The effective address relative to @a iEffSeg.
6798 */
6799DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6800{
6801 RTSEL sel;
6802 switch (iEffSeg)
6803 {
6804 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6805 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6806 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6807 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6808 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6809 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6810 default:
6811 AssertMsgFailed(("%d\n", iEffSeg));
6812 sel = pCtx->ds.Sel;
6813 }
6814 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6815 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6816 {
6817 pFpuCtx->DS = 0;
6818 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6819 }
6820 else
6821 {
6822 pFpuCtx->DS = sel;
6823 pFpuCtx->FPUDP = GCPtrEff;
6824 }
6825}
6826
6827
6828/**
6829 * Rotates the stack registers in the push direction.
6830 *
6831 * @param pFpuCtx The FPU context.
6832 * @remarks This is a complete waste of time, but fxsave stores the registers in
6833 * stack order.
6834 */
6835DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6836{
6837 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6838 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6839 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6840 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6841 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6842 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6843 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6844 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6845 pFpuCtx->aRegs[0].r80 = r80Tmp;
6846}
6847
6848
6849/**
6850 * Rotates the stack registers in the pop direction.
6851 *
6852 * @param pFpuCtx The FPU context.
6853 * @remarks This is a complete waste of time, but fxsave stores the registers in
6854 * stack order.
6855 */
6856DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6857{
6858 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6859 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6860 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6861 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6862 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6863 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6864 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6865 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6866 pFpuCtx->aRegs[7].r80 = r80Tmp;
6867}
6868
6869
6870/**
6871 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6872 * exception prevents it.
6873 *
6874 * @param pResult The FPU operation result to push.
6875 * @param pFpuCtx The FPU context.
6876 */
6877IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6878{
6879 /* Update FSW and bail if there are pending exceptions afterwards. */
6880 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6881 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6882 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6883 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6884 {
6885 pFpuCtx->FSW = fFsw;
6886 return;
6887 }
6888
6889 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6890 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6891 {
6892 /* All is fine, push the actual value. */
6893 pFpuCtx->FTW |= RT_BIT(iNewTop);
6894 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6895 }
6896 else if (pFpuCtx->FCW & X86_FCW_IM)
6897 {
6898 /* Masked stack overflow, push QNaN. */
6899 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6900 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6901 }
6902 else
6903 {
6904 /* Raise stack overflow, don't push anything. */
6905 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6906 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6907 return;
6908 }
6909
6910 fFsw &= ~X86_FSW_TOP_MASK;
6911 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6912 pFpuCtx->FSW = fFsw;
6913
6914 iemFpuRotateStackPush(pFpuCtx);
6915}
6916
6917
6918/**
6919 * Stores a result in a FPU register and updates the FSW and FTW.
6920 *
6921 * @param pFpuCtx The FPU context.
6922 * @param pResult The result to store.
6923 * @param iStReg Which FPU register to store it in.
6924 */
6925IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6926{
6927 Assert(iStReg < 8);
6928 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6929 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6930 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6931 pFpuCtx->FTW |= RT_BIT(iReg);
6932 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6933}
6934
6935
6936/**
6937 * Only updates the FPU status word (FSW) with the result of the current
6938 * instruction.
6939 *
6940 * @param pFpuCtx The FPU context.
6941 * @param u16FSW The FSW output of the current instruction.
6942 */
6943IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6944{
6945 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6946 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6947}
6948
6949
6950/**
6951 * Pops one item off the FPU stack if no pending exception prevents it.
6952 *
6953 * @param pFpuCtx The FPU context.
6954 */
6955IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6956{
6957 /* Check pending exceptions. */
6958 uint16_t uFSW = pFpuCtx->FSW;
6959 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6960 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6961 return;
6962
6963 /* TOP--. */
6964 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6965 uFSW &= ~X86_FSW_TOP_MASK;
6966 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6967 pFpuCtx->FSW = uFSW;
6968
6969 /* Mark the previous ST0 as empty. */
6970 iOldTop >>= X86_FSW_TOP_SHIFT;
6971 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6972
6973 /* Rotate the registers. */
6974 iemFpuRotateStackPop(pFpuCtx);
6975}
6976
6977
6978/**
6979 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6980 *
6981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6982 * @param pResult The FPU operation result to push.
6983 */
6984IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6985{
6986 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6987 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6988 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6989 iemFpuMaybePushResult(pResult, pFpuCtx);
6990}
6991
6992
6993/**
6994 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6995 * and sets FPUDP and FPUDS.
6996 *
6997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6998 * @param pResult The FPU operation result to push.
6999 * @param iEffSeg The effective segment register.
7000 * @param GCPtrEff The effective address relative to @a iEffSeg.
7001 */
7002IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7003{
7004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7005 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7006 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7007 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7008 iemFpuMaybePushResult(pResult, pFpuCtx);
7009}
7010
7011
7012/**
7013 * Replace ST0 with the first value and push the second onto the FPU stack,
7014 * unless a pending exception prevents it.
7015 *
7016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7017 * @param pResult The FPU operation result to store and push.
7018 */
7019IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7020{
7021 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7022 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7023 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7024
7025 /* Update FSW and bail if there are pending exceptions afterwards. */
7026 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7027 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7028 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7029 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7030 {
7031 pFpuCtx->FSW = fFsw;
7032 return;
7033 }
7034
7035 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7036 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7037 {
7038 /* All is fine, push the actual value. */
7039 pFpuCtx->FTW |= RT_BIT(iNewTop);
7040 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7041 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7042 }
7043 else if (pFpuCtx->FCW & X86_FCW_IM)
7044 {
7045 /* Masked stack overflow, push QNaN. */
7046 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7047 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7048 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7049 }
7050 else
7051 {
7052 /* Raise stack overflow, don't push anything. */
7053 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7054 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7055 return;
7056 }
7057
7058 fFsw &= ~X86_FSW_TOP_MASK;
7059 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7060 pFpuCtx->FSW = fFsw;
7061
7062 iemFpuRotateStackPush(pFpuCtx);
7063}
7064
7065
7066/**
7067 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7068 * FOP.
7069 *
7070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7071 * @param pResult The result to store.
7072 * @param iStReg Which FPU register to store it in.
7073 */
7074IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7075{
7076 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7077 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7078 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7079 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7080}
7081
7082
7083/**
7084 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7085 * FOP, and then pops the stack.
7086 *
7087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7088 * @param pResult The result to store.
7089 * @param iStReg Which FPU register to store it in.
7090 */
7091IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7092{
7093 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7094 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7095 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7096 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7097 iemFpuMaybePopOne(pFpuCtx);
7098}
7099
7100
7101/**
7102 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7103 * FPUDP, and FPUDS.
7104 *
7105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7106 * @param pResult The result to store.
7107 * @param iStReg Which FPU register to store it in.
7108 * @param iEffSeg The effective memory operand selector register.
7109 * @param GCPtrEff The effective memory operand offset.
7110 */
7111IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7112 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7113{
7114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7115 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7116 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7117 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7118 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7119}
7120
7121
7122/**
7123 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7124 * FPUDP, and FPUDS, and then pops the stack.
7125 *
7126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7127 * @param pResult The result to store.
7128 * @param iStReg Which FPU register to store it in.
7129 * @param iEffSeg The effective memory operand selector register.
7130 * @param GCPtrEff The effective memory operand offset.
7131 */
7132IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7133 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7134{
7135 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7136 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7137 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7138 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7139 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7140 iemFpuMaybePopOne(pFpuCtx);
7141}
7142
7143
7144/**
7145 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7146 *
7147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7148 */
7149IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7150{
7151 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7152 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7153 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7154}
7155
7156
7157/**
7158 * Marks the specified stack register as free (for FFREE).
7159 *
7160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7161 * @param iStReg The register to free.
7162 */
7163IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7164{
7165 Assert(iStReg < 8);
7166 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7167 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7168 pFpuCtx->FTW &= ~RT_BIT(iReg);
7169}
7170
7171
7172/**
7173 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7174 *
7175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7176 */
7177IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7178{
7179 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7180 uint16_t uFsw = pFpuCtx->FSW;
7181 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7182 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7183 uFsw &= ~X86_FSW_TOP_MASK;
7184 uFsw |= uTop;
7185 pFpuCtx->FSW = uFsw;
7186}
7187
7188
7189/**
7190 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7191 *
7192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7193 */
7194IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7195{
7196 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7197 uint16_t uFsw = pFpuCtx->FSW;
7198 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7199 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7200 uFsw &= ~X86_FSW_TOP_MASK;
7201 uFsw |= uTop;
7202 pFpuCtx->FSW = uFsw;
7203}
7204
7205
7206/**
7207 * Updates the FSW, FOP, FPUIP, and FPUCS.
7208 *
7209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7210 * @param u16FSW The FSW from the current instruction.
7211 */
7212IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7213{
7214 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7215 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7216 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7217 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7218}
7219
7220
7221/**
7222 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7223 *
7224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7225 * @param u16FSW The FSW from the current instruction.
7226 */
7227IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7228{
7229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7231 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7232 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7233 iemFpuMaybePopOne(pFpuCtx);
7234}
7235
7236
7237/**
7238 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7239 *
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param u16FSW The FSW from the current instruction.
7242 * @param iEffSeg The effective memory operand selector register.
7243 * @param GCPtrEff The effective memory operand offset.
7244 */
7245IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7246{
7247 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7248 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7249 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7250 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7251 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7252}
7253
7254
7255/**
7256 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7257 *
7258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7259 * @param u16FSW The FSW from the current instruction.
7260 */
7261IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7262{
7263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7264 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7265 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7266 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7267 iemFpuMaybePopOne(pFpuCtx);
7268 iemFpuMaybePopOne(pFpuCtx);
7269}
7270
7271
7272/**
7273 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7274 *
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param u16FSW The FSW from the current instruction.
7277 * @param iEffSeg The effective memory operand selector register.
7278 * @param GCPtrEff The effective memory operand offset.
7279 */
7280IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7281{
7282 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7283 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7284 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7285 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7286 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7287 iemFpuMaybePopOne(pFpuCtx);
7288}
7289
7290
7291/**
7292 * Worker routine for raising an FPU stack underflow exception.
7293 *
7294 * @param pFpuCtx The FPU context.
7295 * @param iStReg The stack register being accessed.
7296 */
7297IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7298{
7299 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7300 if (pFpuCtx->FCW & X86_FCW_IM)
7301 {
7302 /* Masked underflow. */
7303 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7304 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7305 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7306 if (iStReg != UINT8_MAX)
7307 {
7308 pFpuCtx->FTW |= RT_BIT(iReg);
7309 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7310 }
7311 }
7312 else
7313 {
7314 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7315 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7316 }
7317}
7318
7319
7320/**
7321 * Raises a FPU stack underflow exception.
7322 *
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param iStReg The destination register that should be loaded
7325 * with QNaN if \#IS is not masked. Specify
7326 * UINT8_MAX if none (like for fcom).
7327 */
7328DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7329{
7330 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7331 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7332 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7333 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7334}
7335
7336
7337DECL_NO_INLINE(IEM_STATIC, void)
7338iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7339{
7340 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7341 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7342 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7343 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7344 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7345}
7346
7347
7348DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7349{
7350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7351 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7352 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7353 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7354 iemFpuMaybePopOne(pFpuCtx);
7355}
7356
7357
7358DECL_NO_INLINE(IEM_STATIC, void)
7359iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7360{
7361 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7362 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7363 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7364 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7365 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7366 iemFpuMaybePopOne(pFpuCtx);
7367}
7368
7369
7370DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7371{
7372 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7373 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7374 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7375 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7376 iemFpuMaybePopOne(pFpuCtx);
7377 iemFpuMaybePopOne(pFpuCtx);
7378}
7379
7380
7381DECL_NO_INLINE(IEM_STATIC, void)
7382iemFpuStackPushUnderflow(PVMCPU pVCpu)
7383{
7384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7385 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7386 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7387
7388 if (pFpuCtx->FCW & X86_FCW_IM)
7389 {
7390 /* Masked overflow - Push QNaN. */
7391 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7392 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7393 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7394 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7395 pFpuCtx->FTW |= RT_BIT(iNewTop);
7396 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7397 iemFpuRotateStackPush(pFpuCtx);
7398 }
7399 else
7400 {
7401 /* Exception pending - don't change TOP or the register stack. */
7402 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7403 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7404 }
7405}
7406
7407
7408DECL_NO_INLINE(IEM_STATIC, void)
7409iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7410{
7411 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7412 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7413 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7414
7415 if (pFpuCtx->FCW & X86_FCW_IM)
7416 {
7417 /* Masked overflow - Push QNaN. */
7418 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7419 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7420 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7421 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7422 pFpuCtx->FTW |= RT_BIT(iNewTop);
7423 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7424 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7425 iemFpuRotateStackPush(pFpuCtx);
7426 }
7427 else
7428 {
7429 /* Exception pending - don't change TOP or the register stack. */
7430 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7431 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7432 }
7433}
7434
7435
7436/**
7437 * Worker routine for raising an FPU stack overflow exception on a push.
7438 *
7439 * @param pFpuCtx The FPU context.
7440 */
7441IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7442{
7443 if (pFpuCtx->FCW & X86_FCW_IM)
7444 {
7445 /* Masked overflow. */
7446 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7447 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7448 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7449 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7450 pFpuCtx->FTW |= RT_BIT(iNewTop);
7451 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7452 iemFpuRotateStackPush(pFpuCtx);
7453 }
7454 else
7455 {
7456 /* Exception pending - don't change TOP or the register stack. */
7457 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7458 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7459 }
7460}
7461
7462
7463/**
7464 * Raises a FPU stack overflow exception on a push.
7465 *
7466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7467 */
7468DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7469{
7470 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7471 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7472 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7473 iemFpuStackPushOverflowOnly(pFpuCtx);
7474}
7475
7476
7477/**
7478 * Raises a FPU stack overflow exception on a push with a memory operand.
7479 *
7480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7481 * @param iEffSeg The effective memory operand selector register.
7482 * @param GCPtrEff The effective memory operand offset.
7483 */
7484DECL_NO_INLINE(IEM_STATIC, void)
7485iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7486{
7487 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7488 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7489 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7491 iemFpuStackPushOverflowOnly(pFpuCtx);
7492}
7493
7494
7495IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7496{
7497 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7498 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7499 if (pFpuCtx->FTW & RT_BIT(iReg))
7500 return VINF_SUCCESS;
7501 return VERR_NOT_FOUND;
7502}
7503
7504
7505IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7506{
7507 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7508 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7509 if (pFpuCtx->FTW & RT_BIT(iReg))
7510 {
7511 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7512 return VINF_SUCCESS;
7513 }
7514 return VERR_NOT_FOUND;
7515}
7516
7517
7518IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7519 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7520{
7521 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7522 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7523 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7524 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7525 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7526 {
7527 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7528 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7529 return VINF_SUCCESS;
7530 }
7531 return VERR_NOT_FOUND;
7532}
7533
7534
7535IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7536{
7537 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7538 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7539 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7540 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7541 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7542 {
7543 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7544 return VINF_SUCCESS;
7545 }
7546 return VERR_NOT_FOUND;
7547}
7548
7549
7550/**
7551 * Updates the FPU exception status after FCW is changed.
7552 *
7553 * @param pFpuCtx The FPU context.
7554 */
7555IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7556{
7557 uint16_t u16Fsw = pFpuCtx->FSW;
7558 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7559 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7560 else
7561 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7562 pFpuCtx->FSW = u16Fsw;
7563}
7564
7565
7566/**
7567 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7568 *
7569 * @returns The full FTW.
7570 * @param pFpuCtx The FPU context.
7571 */
7572IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7573{
7574 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7575 uint16_t u16Ftw = 0;
7576 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7577 for (unsigned iSt = 0; iSt < 8; iSt++)
7578 {
7579 unsigned const iReg = (iSt + iTop) & 7;
7580 if (!(u8Ftw & RT_BIT(iReg)))
7581 u16Ftw |= 3 << (iReg * 2); /* empty */
7582 else
7583 {
7584 uint16_t uTag;
7585 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7586 if (pr80Reg->s.uExponent == 0x7fff)
7587 uTag = 2; /* Exponent is all 1's => Special. */
7588 else if (pr80Reg->s.uExponent == 0x0000)
7589 {
7590 if (pr80Reg->s.u64Mantissa == 0x0000)
7591 uTag = 1; /* All bits are zero => Zero. */
7592 else
7593 uTag = 2; /* Must be special. */
7594 }
7595 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7596 uTag = 0; /* Valid. */
7597 else
7598 uTag = 2; /* Must be special. */
7599
7600 u16Ftw |= uTag << (iReg * 2); /* empty */
7601 }
7602 }
7603
7604 return u16Ftw;
7605}
7606
7607
7608/**
7609 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7610 *
7611 * @returns The compressed FTW.
7612 * @param u16FullFtw The full FTW to convert.
7613 */
7614IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7615{
7616 uint8_t u8Ftw = 0;
7617 for (unsigned i = 0; i < 8; i++)
7618 {
7619 if ((u16FullFtw & 3) != 3 /*empty*/)
7620 u8Ftw |= RT_BIT(i);
7621 u16FullFtw >>= 2;
7622 }
7623
7624 return u8Ftw;
7625}
7626
7627/** @} */
7628
7629
7630/** @name Memory access.
7631 *
7632 * @{
7633 */
7634
7635
7636/**
7637 * Updates the IEMCPU::cbWritten counter if applicable.
7638 *
7639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7640 * @param fAccess The access being accounted for.
7641 * @param cbMem The access size.
7642 */
7643DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7644{
7645 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7646 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7647 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7648}
7649
7650
7651/**
7652 * Checks if the given segment can be written to, raise the appropriate
7653 * exception if not.
7654 *
7655 * @returns VBox strict status code.
7656 *
7657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7658 * @param pHid Pointer to the hidden register.
7659 * @param iSegReg The register number.
7660 * @param pu64BaseAddr Where to return the base address to use for the
7661 * segment. (In 64-bit code it may differ from the
7662 * base in the hidden segment.)
7663 */
7664IEM_STATIC VBOXSTRICTRC
7665iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7666{
7667 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7668 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7669 else
7670 {
7671 if (!pHid->Attr.n.u1Present)
7672 {
7673 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7674 AssertRelease(uSel == 0);
7675 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7676 return iemRaiseGeneralProtectionFault0(pVCpu);
7677 }
7678
7679 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7680 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7681 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7682 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7683 *pu64BaseAddr = pHid->u64Base;
7684 }
7685 return VINF_SUCCESS;
7686}
7687
7688
7689/**
7690 * Checks if the given segment can be read from, raise the appropriate
7691 * exception if not.
7692 *
7693 * @returns VBox strict status code.
7694 *
7695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7696 * @param pHid Pointer to the hidden register.
7697 * @param iSegReg The register number.
7698 * @param pu64BaseAddr Where to return the base address to use for the
7699 * segment. (In 64-bit code it may differ from the
7700 * base in the hidden segment.)
7701 */
7702IEM_STATIC VBOXSTRICTRC
7703iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7704{
7705 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7706 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7707 else
7708 {
7709 if (!pHid->Attr.n.u1Present)
7710 {
7711 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7712 AssertRelease(uSel == 0);
7713 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7714 return iemRaiseGeneralProtectionFault0(pVCpu);
7715 }
7716
7717 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7718 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7719 *pu64BaseAddr = pHid->u64Base;
7720 }
7721 return VINF_SUCCESS;
7722}
7723
7724
7725/**
7726 * Applies the segment limit, base and attributes.
7727 *
7728 * This may raise a \#GP or \#SS.
7729 *
7730 * @returns VBox strict status code.
7731 *
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param fAccess The kind of access which is being performed.
7734 * @param iSegReg The index of the segment register to apply.
7735 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7736 * TSS, ++).
7737 * @param cbMem The access size.
7738 * @param pGCPtrMem Pointer to the guest memory address to apply
7739 * segmentation to. Input and output parameter.
7740 */
7741IEM_STATIC VBOXSTRICTRC
7742iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7743{
7744 if (iSegReg == UINT8_MAX)
7745 return VINF_SUCCESS;
7746
7747 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7748 switch (pVCpu->iem.s.enmCpuMode)
7749 {
7750 case IEMMODE_16BIT:
7751 case IEMMODE_32BIT:
7752 {
7753 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7754 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7755
7756 if ( pSel->Attr.n.u1Present
7757 && !pSel->Attr.n.u1Unusable)
7758 {
7759 Assert(pSel->Attr.n.u1DescType);
7760 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7761 {
7762 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7763 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7764 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7765
7766 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7767 {
7768 /** @todo CPL check. */
7769 }
7770
7771 /*
7772 * There are two kinds of data selectors, normal and expand down.
7773 */
7774 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7775 {
7776 if ( GCPtrFirst32 > pSel->u32Limit
7777 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7778 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7779 }
7780 else
7781 {
7782 /*
7783 * The upper boundary is defined by the B bit, not the G bit!
7784 */
7785 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7786 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7787 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7788 }
7789 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7790 }
7791 else
7792 {
7793
7794 /*
7795 * Code selector and usually be used to read thru, writing is
7796 * only permitted in real and V8086 mode.
7797 */
7798 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7799 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7800 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7801 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7802 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7803
7804 if ( GCPtrFirst32 > pSel->u32Limit
7805 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7806 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7807
7808 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7809 {
7810 /** @todo CPL check. */
7811 }
7812
7813 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7814 }
7815 }
7816 else
7817 return iemRaiseGeneralProtectionFault0(pVCpu);
7818 return VINF_SUCCESS;
7819 }
7820
7821 case IEMMODE_64BIT:
7822 {
7823 RTGCPTR GCPtrMem = *pGCPtrMem;
7824 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7825 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7826
7827 Assert(cbMem >= 1);
7828 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7829 return VINF_SUCCESS;
7830 return iemRaiseGeneralProtectionFault0(pVCpu);
7831 }
7832
7833 default:
7834 AssertFailedReturn(VERR_IEM_IPE_7);
7835 }
7836}
7837
7838
7839/**
7840 * Translates a virtual address to a physical physical address and checks if we
7841 * can access the page as specified.
7842 *
7843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7844 * @param GCPtrMem The virtual address.
7845 * @param fAccess The intended access.
7846 * @param pGCPhysMem Where to return the physical address.
7847 */
7848IEM_STATIC VBOXSTRICTRC
7849iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7850{
7851 /** @todo Need a different PGM interface here. We're currently using
7852 * generic / REM interfaces. this won't cut it for R0 & RC. */
7853 RTGCPHYS GCPhys;
7854 uint64_t fFlags;
7855 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7856 if (RT_FAILURE(rc))
7857 {
7858 /** @todo Check unassigned memory in unpaged mode. */
7859 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7860 *pGCPhysMem = NIL_RTGCPHYS;
7861 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7862 }
7863
7864 /* If the page is writable and does not have the no-exec bit set, all
7865 access is allowed. Otherwise we'll have to check more carefully... */
7866 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7867 {
7868 /* Write to read only memory? */
7869 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7870 && !(fFlags & X86_PTE_RW)
7871 && ( (pVCpu->iem.s.uCpl == 3
7872 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7873 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7874 {
7875 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7876 *pGCPhysMem = NIL_RTGCPHYS;
7877 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7878 }
7879
7880 /* Kernel memory accessed by userland? */
7881 if ( !(fFlags & X86_PTE_US)
7882 && pVCpu->iem.s.uCpl == 3
7883 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7884 {
7885 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7886 *pGCPhysMem = NIL_RTGCPHYS;
7887 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7888 }
7889
7890 /* Executing non-executable memory? */
7891 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7892 && (fFlags & X86_PTE_PAE_NX)
7893 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7894 {
7895 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7896 *pGCPhysMem = NIL_RTGCPHYS;
7897 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7898 VERR_ACCESS_DENIED);
7899 }
7900 }
7901
7902 /*
7903 * Set the dirty / access flags.
7904 * ASSUMES this is set when the address is translated rather than on committ...
7905 */
7906 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7907 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7908 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7909 {
7910 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7911 AssertRC(rc2);
7912 }
7913
7914 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7915 *pGCPhysMem = GCPhys;
7916 return VINF_SUCCESS;
7917}
7918
7919
7920
7921/**
7922 * Maps a physical page.
7923 *
7924 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7926 * @param GCPhysMem The physical address.
7927 * @param fAccess The intended access.
7928 * @param ppvMem Where to return the mapping address.
7929 * @param pLock The PGM lock.
7930 */
7931IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7932{
7933#ifdef IEM_VERIFICATION_MODE_FULL
7934 /* Force the alternative path so we can ignore writes. */
7935 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7936 {
7937 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7938 {
7939 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7940 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7941 if (RT_FAILURE(rc2))
7942 pVCpu->iem.s.fProblematicMemory = true;
7943 }
7944 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7945 }
7946#endif
7947#ifdef IEM_LOG_MEMORY_WRITES
7948 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7949 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7950#endif
7951#ifdef IEM_VERIFICATION_MODE_MINIMAL
7952 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7953#endif
7954
7955 /** @todo This API may require some improving later. A private deal with PGM
7956 * regarding locking and unlocking needs to be struct. A couple of TLBs
7957 * living in PGM, but with publicly accessible inlined access methods
7958 * could perhaps be an even better solution. */
7959 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7960 GCPhysMem,
7961 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7962 pVCpu->iem.s.fBypassHandlers,
7963 ppvMem,
7964 pLock);
7965 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7966 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7967
7968#ifdef IEM_VERIFICATION_MODE_FULL
7969 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7970 pVCpu->iem.s.fProblematicMemory = true;
7971#endif
7972 return rc;
7973}
7974
7975
7976/**
7977 * Unmap a page previously mapped by iemMemPageMap.
7978 *
7979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7980 * @param GCPhysMem The physical address.
7981 * @param fAccess The intended access.
7982 * @param pvMem What iemMemPageMap returned.
7983 * @param pLock The PGM lock.
7984 */
7985DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7986{
7987 NOREF(pVCpu);
7988 NOREF(GCPhysMem);
7989 NOREF(fAccess);
7990 NOREF(pvMem);
7991 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7992}
7993
7994
7995/**
7996 * Looks up a memory mapping entry.
7997 *
7998 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8000 * @param pvMem The memory address.
8001 * @param fAccess The access to.
8002 */
8003DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8004{
8005 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8006 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8007 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8008 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8009 return 0;
8010 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8011 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8012 return 1;
8013 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8014 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8015 return 2;
8016 return VERR_NOT_FOUND;
8017}
8018
8019
8020/**
8021 * Finds a free memmap entry when using iNextMapping doesn't work.
8022 *
8023 * @returns Memory mapping index, 1024 on failure.
8024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8025 */
8026IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8027{
8028 /*
8029 * The easy case.
8030 */
8031 if (pVCpu->iem.s.cActiveMappings == 0)
8032 {
8033 pVCpu->iem.s.iNextMapping = 1;
8034 return 0;
8035 }
8036
8037 /* There should be enough mappings for all instructions. */
8038 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8039
8040 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8041 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8042 return i;
8043
8044 AssertFailedReturn(1024);
8045}
8046
8047
8048/**
8049 * Commits a bounce buffer that needs writing back and unmaps it.
8050 *
8051 * @returns Strict VBox status code.
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param iMemMap The index of the buffer to commit.
8054 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8055 * Always false in ring-3, obviously.
8056 */
8057IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8058{
8059 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8060 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8061#ifdef IN_RING3
8062 Assert(!fPostponeFail);
8063 RT_NOREF_PV(fPostponeFail);
8064#endif
8065
8066 /*
8067 * Do the writing.
8068 */
8069#ifndef IEM_VERIFICATION_MODE_MINIMAL
8070 PVM pVM = pVCpu->CTX_SUFF(pVM);
8071 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8072 && !IEM_VERIFICATION_ENABLED(pVCpu))
8073 {
8074 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8075 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8076 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8077 if (!pVCpu->iem.s.fBypassHandlers)
8078 {
8079 /*
8080 * Carefully and efficiently dealing with access handler return
8081 * codes make this a little bloated.
8082 */
8083 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8084 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8085 pbBuf,
8086 cbFirst,
8087 PGMACCESSORIGIN_IEM);
8088 if (rcStrict == VINF_SUCCESS)
8089 {
8090 if (cbSecond)
8091 {
8092 rcStrict = PGMPhysWrite(pVM,
8093 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8094 pbBuf + cbFirst,
8095 cbSecond,
8096 PGMACCESSORIGIN_IEM);
8097 if (rcStrict == VINF_SUCCESS)
8098 { /* nothing */ }
8099 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8100 {
8101 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8102 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8103 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8105 }
8106# ifndef IN_RING3
8107 else if (fPostponeFail)
8108 {
8109 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8110 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8111 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8112 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8113 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8114 return iemSetPassUpStatus(pVCpu, rcStrict);
8115 }
8116# endif
8117 else
8118 {
8119 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8120 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8121 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8122 return rcStrict;
8123 }
8124 }
8125 }
8126 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8127 {
8128 if (!cbSecond)
8129 {
8130 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8131 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8132 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8133 }
8134 else
8135 {
8136 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8138 pbBuf + cbFirst,
8139 cbSecond,
8140 PGMACCESSORIGIN_IEM);
8141 if (rcStrict2 == VINF_SUCCESS)
8142 {
8143 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8144 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8145 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8146 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8147 }
8148 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8149 {
8150 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8151 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8152 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8153 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8154 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8155 }
8156# ifndef IN_RING3
8157 else if (fPostponeFail)
8158 {
8159 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8160 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8161 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8162 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8163 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8164 return iemSetPassUpStatus(pVCpu, rcStrict);
8165 }
8166# endif
8167 else
8168 {
8169 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8171 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8172 return rcStrict2;
8173 }
8174 }
8175 }
8176# ifndef IN_RING3
8177 else if (fPostponeFail)
8178 {
8179 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8180 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8181 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8182 if (!cbSecond)
8183 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8184 else
8185 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8186 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8187 return iemSetPassUpStatus(pVCpu, rcStrict);
8188 }
8189# endif
8190 else
8191 {
8192 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8194 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8195 return rcStrict;
8196 }
8197 }
8198 else
8199 {
8200 /*
8201 * No access handlers, much simpler.
8202 */
8203 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8204 if (RT_SUCCESS(rc))
8205 {
8206 if (cbSecond)
8207 {
8208 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8209 if (RT_SUCCESS(rc))
8210 { /* likely */ }
8211 else
8212 {
8213 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8214 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8216 return rc;
8217 }
8218 }
8219 }
8220 else
8221 {
8222 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8223 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8224 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8225 return rc;
8226 }
8227 }
8228 }
8229#endif
8230
8231#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8232 /*
8233 * Record the write(s).
8234 */
8235 if (!pVCpu->iem.s.fNoRem)
8236 {
8237 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8238 if (pEvtRec)
8239 {
8240 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8241 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8242 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8243 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8244 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8245 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8246 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8247 }
8248 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8249 {
8250 pEvtRec = iemVerifyAllocRecord(pVCpu);
8251 if (pEvtRec)
8252 {
8253 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8254 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8255 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8256 memcpy(pEvtRec->u.RamWrite.ab,
8257 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8258 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8259 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8260 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8261 }
8262 }
8263 }
8264#endif
8265#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8266 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8267 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8268 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8269 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8270 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8271 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8272
8273 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8274 g_cbIemWrote = cbWrote;
8275 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8276#endif
8277
8278 /*
8279 * Free the mapping entry.
8280 */
8281 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8282 Assert(pVCpu->iem.s.cActiveMappings != 0);
8283 pVCpu->iem.s.cActiveMappings--;
8284 return VINF_SUCCESS;
8285}
8286
8287
8288/**
8289 * iemMemMap worker that deals with a request crossing pages.
8290 */
8291IEM_STATIC VBOXSTRICTRC
8292iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8293{
8294 /*
8295 * Do the address translations.
8296 */
8297 RTGCPHYS GCPhysFirst;
8298 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8299 if (rcStrict != VINF_SUCCESS)
8300 return rcStrict;
8301
8302 RTGCPHYS GCPhysSecond;
8303 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8304 fAccess, &GCPhysSecond);
8305 if (rcStrict != VINF_SUCCESS)
8306 return rcStrict;
8307 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8308
8309 PVM pVM = pVCpu->CTX_SUFF(pVM);
8310#ifdef IEM_VERIFICATION_MODE_FULL
8311 /*
8312 * Detect problematic memory when verifying so we can select
8313 * the right execution engine. (TLB: Redo this.)
8314 */
8315 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8316 {
8317 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8318 if (RT_SUCCESS(rc2))
8319 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8320 if (RT_FAILURE(rc2))
8321 pVCpu->iem.s.fProblematicMemory = true;
8322 }
8323#endif
8324
8325
8326 /*
8327 * Read in the current memory content if it's a read, execute or partial
8328 * write access.
8329 */
8330 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8331 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8332 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8333
8334 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8335 {
8336 if (!pVCpu->iem.s.fBypassHandlers)
8337 {
8338 /*
8339 * Must carefully deal with access handler status codes here,
8340 * makes the code a bit bloated.
8341 */
8342 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8343 if (rcStrict == VINF_SUCCESS)
8344 {
8345 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8346 if (rcStrict == VINF_SUCCESS)
8347 { /*likely */ }
8348 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8349 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8350 else
8351 {
8352 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8353 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8354 return rcStrict;
8355 }
8356 }
8357 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8358 {
8359 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8360 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8361 {
8362 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8363 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8364 }
8365 else
8366 {
8367 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8368 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8369 return rcStrict2;
8370 }
8371 }
8372 else
8373 {
8374 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8375 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8376 return rcStrict;
8377 }
8378 }
8379 else
8380 {
8381 /*
8382 * No informational status codes here, much more straight forward.
8383 */
8384 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8385 if (RT_SUCCESS(rc))
8386 {
8387 Assert(rc == VINF_SUCCESS);
8388 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8389 if (RT_SUCCESS(rc))
8390 Assert(rc == VINF_SUCCESS);
8391 else
8392 {
8393 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8394 return rc;
8395 }
8396 }
8397 else
8398 {
8399 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8400 return rc;
8401 }
8402 }
8403
8404#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8405 if ( !pVCpu->iem.s.fNoRem
8406 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8407 {
8408 /*
8409 * Record the reads.
8410 */
8411 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8412 if (pEvtRec)
8413 {
8414 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8415 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8416 pEvtRec->u.RamRead.cb = cbFirstPage;
8417 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8418 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8419 }
8420 pEvtRec = iemVerifyAllocRecord(pVCpu);
8421 if (pEvtRec)
8422 {
8423 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8424 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8425 pEvtRec->u.RamRead.cb = cbSecondPage;
8426 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8427 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8428 }
8429 }
8430#endif
8431 }
8432#ifdef VBOX_STRICT
8433 else
8434 memset(pbBuf, 0xcc, cbMem);
8435 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8436 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8437#endif
8438
8439 /*
8440 * Commit the bounce buffer entry.
8441 */
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8445 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8446 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8447 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8448 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8449 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8450 pVCpu->iem.s.cActiveMappings++;
8451
8452 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8453 *ppvMem = pbBuf;
8454 return VINF_SUCCESS;
8455}
8456
8457
8458/**
8459 * iemMemMap woker that deals with iemMemPageMap failures.
8460 */
8461IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8462 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8463{
8464 /*
8465 * Filter out conditions we can handle and the ones which shouldn't happen.
8466 */
8467 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8468 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8469 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8470 {
8471 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8472 return rcMap;
8473 }
8474 pVCpu->iem.s.cPotentialExits++;
8475
8476 /*
8477 * Read in the current memory content if it's a read, execute or partial
8478 * write access.
8479 */
8480 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8481 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8482 {
8483 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8484 memset(pbBuf, 0xff, cbMem);
8485 else
8486 {
8487 int rc;
8488 if (!pVCpu->iem.s.fBypassHandlers)
8489 {
8490 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8491 if (rcStrict == VINF_SUCCESS)
8492 { /* nothing */ }
8493 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8494 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8495 else
8496 {
8497 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8498 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8499 return rcStrict;
8500 }
8501 }
8502 else
8503 {
8504 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8505 if (RT_SUCCESS(rc))
8506 { /* likely */ }
8507 else
8508 {
8509 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8510 GCPhysFirst, rc));
8511 return rc;
8512 }
8513 }
8514 }
8515
8516#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8517 if ( !pVCpu->iem.s.fNoRem
8518 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8519 {
8520 /*
8521 * Record the read.
8522 */
8523 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8524 if (pEvtRec)
8525 {
8526 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8527 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8528 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8529 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8530 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8531 }
8532 }
8533#endif
8534 }
8535#ifdef VBOX_STRICT
8536 else
8537 memset(pbBuf, 0xcc, cbMem);
8538#endif
8539#ifdef VBOX_STRICT
8540 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8541 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8542#endif
8543
8544 /*
8545 * Commit the bounce buffer entry.
8546 */
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8549 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8550 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8551 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8552 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8554 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8555 pVCpu->iem.s.cActiveMappings++;
8556
8557 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8558 *ppvMem = pbBuf;
8559 return VINF_SUCCESS;
8560}
8561
8562
8563
8564/**
8565 * Maps the specified guest memory for the given kind of access.
8566 *
8567 * This may be using bounce buffering of the memory if it's crossing a page
8568 * boundary or if there is an access handler installed for any of it. Because
8569 * of lock prefix guarantees, we're in for some extra clutter when this
8570 * happens.
8571 *
8572 * This may raise a \#GP, \#SS, \#PF or \#AC.
8573 *
8574 * @returns VBox strict status code.
8575 *
8576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8577 * @param ppvMem Where to return the pointer to the mapped
8578 * memory.
8579 * @param cbMem The number of bytes to map. This is usually 1,
8580 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8581 * string operations it can be up to a page.
8582 * @param iSegReg The index of the segment register to use for
8583 * this access. The base and limits are checked.
8584 * Use UINT8_MAX to indicate that no segmentation
8585 * is required (for IDT, GDT and LDT accesses).
8586 * @param GCPtrMem The address of the guest memory.
8587 * @param fAccess How the memory is being accessed. The
8588 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8589 * how to map the memory, while the
8590 * IEM_ACCESS_WHAT_XXX bit is used when raising
8591 * exceptions.
8592 */
8593IEM_STATIC VBOXSTRICTRC
8594iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8595{
8596 /*
8597 * Check the input and figure out which mapping entry to use.
8598 */
8599 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8600 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8601 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8602
8603 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8604 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8605 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8606 {
8607 iMemMap = iemMemMapFindFree(pVCpu);
8608 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8609 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8610 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8611 pVCpu->iem.s.aMemMappings[2].fAccess),
8612 VERR_IEM_IPE_9);
8613 }
8614
8615 /*
8616 * Map the memory, checking that we can actually access it. If something
8617 * slightly complicated happens, fall back on bounce buffering.
8618 */
8619 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8620 if (rcStrict != VINF_SUCCESS)
8621 return rcStrict;
8622
8623 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8624 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8625
8626 RTGCPHYS GCPhysFirst;
8627 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8628 if (rcStrict != VINF_SUCCESS)
8629 return rcStrict;
8630
8631 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8632 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8633 if (fAccess & IEM_ACCESS_TYPE_READ)
8634 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8635
8636 void *pvMem;
8637 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8638 if (rcStrict != VINF_SUCCESS)
8639 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8640
8641 /*
8642 * Fill in the mapping table entry.
8643 */
8644 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8645 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8646 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8647 pVCpu->iem.s.cActiveMappings++;
8648
8649 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8650 *ppvMem = pvMem;
8651 return VINF_SUCCESS;
8652}
8653
8654
8655/**
8656 * Commits the guest memory if bounce buffered and unmaps it.
8657 *
8658 * @returns Strict VBox status code.
8659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8660 * @param pvMem The mapping.
8661 * @param fAccess The kind of access.
8662 */
8663IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8664{
8665 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8666 AssertReturn(iMemMap >= 0, iMemMap);
8667
8668 /* If it's bounce buffered, we may need to write back the buffer. */
8669 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8670 {
8671 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8672 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8673 }
8674 /* Otherwise unlock it. */
8675 else
8676 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8677
8678 /* Free the entry. */
8679 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8680 Assert(pVCpu->iem.s.cActiveMappings != 0);
8681 pVCpu->iem.s.cActiveMappings--;
8682 return VINF_SUCCESS;
8683}
8684
8685#ifdef IEM_WITH_SETJMP
8686
8687/**
8688 * Maps the specified guest memory for the given kind of access, longjmp on
8689 * error.
8690 *
8691 * This may be using bounce buffering of the memory if it's crossing a page
8692 * boundary or if there is an access handler installed for any of it. Because
8693 * of lock prefix guarantees, we're in for some extra clutter when this
8694 * happens.
8695 *
8696 * This may raise a \#GP, \#SS, \#PF or \#AC.
8697 *
8698 * @returns Pointer to the mapped memory.
8699 *
8700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8701 * @param cbMem The number of bytes to map. This is usually 1,
8702 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8703 * string operations it can be up to a page.
8704 * @param iSegReg The index of the segment register to use for
8705 * this access. The base and limits are checked.
8706 * Use UINT8_MAX to indicate that no segmentation
8707 * is required (for IDT, GDT and LDT accesses).
8708 * @param GCPtrMem The address of the guest memory.
8709 * @param fAccess How the memory is being accessed. The
8710 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8711 * how to map the memory, while the
8712 * IEM_ACCESS_WHAT_XXX bit is used when raising
8713 * exceptions.
8714 */
8715IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8716{
8717 /*
8718 * Check the input and figure out which mapping entry to use.
8719 */
8720 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8721 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8722 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8723
8724 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8725 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8726 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8727 {
8728 iMemMap = iemMemMapFindFree(pVCpu);
8729 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8730 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8731 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8732 pVCpu->iem.s.aMemMappings[2].fAccess),
8733 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8734 }
8735
8736 /*
8737 * Map the memory, checking that we can actually access it. If something
8738 * slightly complicated happens, fall back on bounce buffering.
8739 */
8740 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8741 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8742 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8743
8744 /* Crossing a page boundary? */
8745 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8746 { /* No (likely). */ }
8747 else
8748 {
8749 void *pvMem;
8750 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8751 if (rcStrict == VINF_SUCCESS)
8752 return pvMem;
8753 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8754 }
8755
8756 RTGCPHYS GCPhysFirst;
8757 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8758 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8759 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8760
8761 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8762 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8763 if (fAccess & IEM_ACCESS_TYPE_READ)
8764 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8765
8766 void *pvMem;
8767 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8768 if (rcStrict == VINF_SUCCESS)
8769 { /* likely */ }
8770 else
8771 {
8772 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8773 if (rcStrict == VINF_SUCCESS)
8774 return pvMem;
8775 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8776 }
8777
8778 /*
8779 * Fill in the mapping table entry.
8780 */
8781 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8782 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8783 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8784 pVCpu->iem.s.cActiveMappings++;
8785
8786 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8787 return pvMem;
8788}
8789
8790
8791/**
8792 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8793 *
8794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8795 * @param pvMem The mapping.
8796 * @param fAccess The kind of access.
8797 */
8798IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8799{
8800 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8801 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8802
8803 /* If it's bounce buffered, we may need to write back the buffer. */
8804 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8805 {
8806 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8807 {
8808 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8809 if (rcStrict == VINF_SUCCESS)
8810 return;
8811 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8812 }
8813 }
8814 /* Otherwise unlock it. */
8815 else
8816 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8817
8818 /* Free the entry. */
8819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8820 Assert(pVCpu->iem.s.cActiveMappings != 0);
8821 pVCpu->iem.s.cActiveMappings--;
8822}
8823
8824#endif
8825
8826#ifndef IN_RING3
8827/**
8828 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8829 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8830 *
8831 * Allows the instruction to be completed and retired, while the IEM user will
8832 * return to ring-3 immediately afterwards and do the postponed writes there.
8833 *
8834 * @returns VBox status code (no strict statuses). Caller must check
8835 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8837 * @param pvMem The mapping.
8838 * @param fAccess The kind of access.
8839 */
8840IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8841{
8842 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8843 AssertReturn(iMemMap >= 0, iMemMap);
8844
8845 /* If it's bounce buffered, we may need to write back the buffer. */
8846 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8847 {
8848 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8849 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8850 }
8851 /* Otherwise unlock it. */
8852 else
8853 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8854
8855 /* Free the entry. */
8856 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8857 Assert(pVCpu->iem.s.cActiveMappings != 0);
8858 pVCpu->iem.s.cActiveMappings--;
8859 return VINF_SUCCESS;
8860}
8861#endif
8862
8863
8864/**
8865 * Rollbacks mappings, releasing page locks and such.
8866 *
8867 * The caller shall only call this after checking cActiveMappings.
8868 *
8869 * @returns Strict VBox status code to pass up.
8870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8871 */
8872IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8873{
8874 Assert(pVCpu->iem.s.cActiveMappings > 0);
8875
8876 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8877 while (iMemMap-- > 0)
8878 {
8879 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8880 if (fAccess != IEM_ACCESS_INVALID)
8881 {
8882 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8883 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8884 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8885 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8886 Assert(pVCpu->iem.s.cActiveMappings > 0);
8887 pVCpu->iem.s.cActiveMappings--;
8888 }
8889 }
8890}
8891
8892
8893/**
8894 * Fetches a data byte.
8895 *
8896 * @returns Strict VBox status code.
8897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8898 * @param pu8Dst Where to return the byte.
8899 * @param iSegReg The index of the segment register to use for
8900 * this access. The base and limits are checked.
8901 * @param GCPtrMem The address of the guest memory.
8902 */
8903IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8904{
8905 /* The lazy approach for now... */
8906 uint8_t const *pu8Src;
8907 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8908 if (rc == VINF_SUCCESS)
8909 {
8910 *pu8Dst = *pu8Src;
8911 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8912 }
8913 return rc;
8914}
8915
8916
8917#ifdef IEM_WITH_SETJMP
8918/**
8919 * Fetches a data byte, longjmp on error.
8920 *
8921 * @returns The byte.
8922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8923 * @param iSegReg The index of the segment register to use for
8924 * this access. The base and limits are checked.
8925 * @param GCPtrMem The address of the guest memory.
8926 */
8927DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8928{
8929 /* The lazy approach for now... */
8930 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8931 uint8_t const bRet = *pu8Src;
8932 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8933 return bRet;
8934}
8935#endif /* IEM_WITH_SETJMP */
8936
8937
8938/**
8939 * Fetches a data word.
8940 *
8941 * @returns Strict VBox status code.
8942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8943 * @param pu16Dst Where to return the word.
8944 * @param iSegReg The index of the segment register to use for
8945 * this access. The base and limits are checked.
8946 * @param GCPtrMem The address of the guest memory.
8947 */
8948IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8949{
8950 /* The lazy approach for now... */
8951 uint16_t const *pu16Src;
8952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8953 if (rc == VINF_SUCCESS)
8954 {
8955 *pu16Dst = *pu16Src;
8956 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8957 }
8958 return rc;
8959}
8960
8961
8962#ifdef IEM_WITH_SETJMP
8963/**
8964 * Fetches a data word, longjmp on error.
8965 *
8966 * @returns The word
8967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8968 * @param iSegReg The index of the segment register to use for
8969 * this access. The base and limits are checked.
8970 * @param GCPtrMem The address of the guest memory.
8971 */
8972DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8973{
8974 /* The lazy approach for now... */
8975 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8976 uint16_t const u16Ret = *pu16Src;
8977 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8978 return u16Ret;
8979}
8980#endif
8981
8982
8983/**
8984 * Fetches a data dword.
8985 *
8986 * @returns Strict VBox status code.
8987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8988 * @param pu32Dst Where to return the dword.
8989 * @param iSegReg The index of the segment register to use for
8990 * this access. The base and limits are checked.
8991 * @param GCPtrMem The address of the guest memory.
8992 */
8993IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8994{
8995 /* The lazy approach for now... */
8996 uint32_t const *pu32Src;
8997 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8998 if (rc == VINF_SUCCESS)
8999 {
9000 *pu32Dst = *pu32Src;
9001 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9002 }
9003 return rc;
9004}
9005
9006
9007#ifdef IEM_WITH_SETJMP
9008
9009IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9010{
9011 Assert(cbMem >= 1);
9012 Assert(iSegReg < X86_SREG_COUNT);
9013
9014 /*
9015 * 64-bit mode is simpler.
9016 */
9017 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9018 {
9019 if (iSegReg >= X86_SREG_FS)
9020 {
9021 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9022 GCPtrMem += pSel->u64Base;
9023 }
9024
9025 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9026 return GCPtrMem;
9027 }
9028 /*
9029 * 16-bit and 32-bit segmentation.
9030 */
9031 else
9032 {
9033 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9034 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9035 == X86DESCATTR_P /* data, expand up */
9036 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9037 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9038 {
9039 /* expand up */
9040 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9041 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9042 && GCPtrLast32 > (uint32_t)GCPtrMem))
9043 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9044 }
9045 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9046 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9047 {
9048 /* expand down */
9049 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9050 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9051 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9052 && GCPtrLast32 > (uint32_t)GCPtrMem))
9053 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9054 }
9055 else
9056 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9057 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9058 }
9059 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9060}
9061
9062
9063IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9064{
9065 Assert(cbMem >= 1);
9066 Assert(iSegReg < X86_SREG_COUNT);
9067
9068 /*
9069 * 64-bit mode is simpler.
9070 */
9071 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9072 {
9073 if (iSegReg >= X86_SREG_FS)
9074 {
9075 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9076 GCPtrMem += pSel->u64Base;
9077 }
9078
9079 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9080 return GCPtrMem;
9081 }
9082 /*
9083 * 16-bit and 32-bit segmentation.
9084 */
9085 else
9086 {
9087 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9088 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9089 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9090 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9091 {
9092 /* expand up */
9093 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9094 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9095 && GCPtrLast32 > (uint32_t)GCPtrMem))
9096 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9097 }
9098 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9099 {
9100 /* expand down */
9101 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9102 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9103 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9104 && GCPtrLast32 > (uint32_t)GCPtrMem))
9105 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9106 }
9107 else
9108 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9109 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9110 }
9111 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9112}
9113
9114
9115/**
9116 * Fetches a data dword, longjmp on error, fallback/safe version.
9117 *
9118 * @returns The dword
9119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9120 * @param iSegReg The index of the segment register to use for
9121 * this access. The base and limits are checked.
9122 * @param GCPtrMem The address of the guest memory.
9123 */
9124IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9125{
9126 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9127 uint32_t const u32Ret = *pu32Src;
9128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9129 return u32Ret;
9130}
9131
9132
9133/**
9134 * Fetches a data dword, longjmp on error.
9135 *
9136 * @returns The dword
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param iSegReg The index of the segment register to use for
9139 * this access. The base and limits are checked.
9140 * @param GCPtrMem The address of the guest memory.
9141 */
9142DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9143{
9144# ifdef IEM_WITH_DATA_TLB
9145 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9146 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9147 {
9148 /// @todo more later.
9149 }
9150
9151 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9152# else
9153 /* The lazy approach. */
9154 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9155 uint32_t const u32Ret = *pu32Src;
9156 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9157 return u32Ret;
9158# endif
9159}
9160#endif
9161
9162
9163#ifdef SOME_UNUSED_FUNCTION
9164/**
9165 * Fetches a data dword and sign extends it to a qword.
9166 *
9167 * @returns Strict VBox status code.
9168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9169 * @param pu64Dst Where to return the sign extended value.
9170 * @param iSegReg The index of the segment register to use for
9171 * this access. The base and limits are checked.
9172 * @param GCPtrMem The address of the guest memory.
9173 */
9174IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9175{
9176 /* The lazy approach for now... */
9177 int32_t const *pi32Src;
9178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9179 if (rc == VINF_SUCCESS)
9180 {
9181 *pu64Dst = *pi32Src;
9182 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9183 }
9184#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9185 else
9186 *pu64Dst = 0;
9187#endif
9188 return rc;
9189}
9190#endif
9191
9192
9193/**
9194 * Fetches a data qword.
9195 *
9196 * @returns Strict VBox status code.
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param pu64Dst Where to return the qword.
9199 * @param iSegReg The index of the segment register to use for
9200 * this access. The base and limits are checked.
9201 * @param GCPtrMem The address of the guest memory.
9202 */
9203IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9204{
9205 /* The lazy approach for now... */
9206 uint64_t const *pu64Src;
9207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9208 if (rc == VINF_SUCCESS)
9209 {
9210 *pu64Dst = *pu64Src;
9211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9212 }
9213 return rc;
9214}
9215
9216
9217#ifdef IEM_WITH_SETJMP
9218/**
9219 * Fetches a data qword, longjmp on error.
9220 *
9221 * @returns The qword.
9222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9223 * @param iSegReg The index of the segment register to use for
9224 * this access. The base and limits are checked.
9225 * @param GCPtrMem The address of the guest memory.
9226 */
9227DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9228{
9229 /* The lazy approach for now... */
9230 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9231 uint64_t const u64Ret = *pu64Src;
9232 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9233 return u64Ret;
9234}
9235#endif
9236
9237
9238/**
9239 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9240 *
9241 * @returns Strict VBox status code.
9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9243 * @param pu64Dst Where to return the qword.
9244 * @param iSegReg The index of the segment register to use for
9245 * this access. The base and limits are checked.
9246 * @param GCPtrMem The address of the guest memory.
9247 */
9248IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9249{
9250 /* The lazy approach for now... */
9251 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9252 if (RT_UNLIKELY(GCPtrMem & 15))
9253 return iemRaiseGeneralProtectionFault0(pVCpu);
9254
9255 uint64_t const *pu64Src;
9256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9257 if (rc == VINF_SUCCESS)
9258 {
9259 *pu64Dst = *pu64Src;
9260 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9261 }
9262 return rc;
9263}
9264
9265
9266#ifdef IEM_WITH_SETJMP
9267/**
9268 * Fetches a data qword, longjmp on error.
9269 *
9270 * @returns The qword.
9271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9272 * @param iSegReg The index of the segment register to use for
9273 * this access. The base and limits are checked.
9274 * @param GCPtrMem The address of the guest memory.
9275 */
9276DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9277{
9278 /* The lazy approach for now... */
9279 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9280 if (RT_LIKELY(!(GCPtrMem & 15)))
9281 {
9282 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9283 uint64_t const u64Ret = *pu64Src;
9284 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9285 return u64Ret;
9286 }
9287
9288 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9289 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9290}
9291#endif
9292
9293
9294/**
9295 * Fetches a data tword.
9296 *
9297 * @returns Strict VBox status code.
9298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9299 * @param pr80Dst Where to return the tword.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 */
9304IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9305{
9306 /* The lazy approach for now... */
9307 PCRTFLOAT80U pr80Src;
9308 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9309 if (rc == VINF_SUCCESS)
9310 {
9311 *pr80Dst = *pr80Src;
9312 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9313 }
9314 return rc;
9315}
9316
9317
9318#ifdef IEM_WITH_SETJMP
9319/**
9320 * Fetches a data tword, longjmp on error.
9321 *
9322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9323 * @param pr80Dst Where to return the tword.
9324 * @param iSegReg The index of the segment register to use for
9325 * this access. The base and limits are checked.
9326 * @param GCPtrMem The address of the guest memory.
9327 */
9328DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9329{
9330 /* The lazy approach for now... */
9331 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9332 *pr80Dst = *pr80Src;
9333 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9334}
9335#endif
9336
9337
9338/**
9339 * Fetches a data dqword (double qword), generally SSE related.
9340 *
9341 * @returns Strict VBox status code.
9342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9343 * @param pu128Dst Where to return the qword.
9344 * @param iSegReg The index of the segment register to use for
9345 * this access. The base and limits are checked.
9346 * @param GCPtrMem The address of the guest memory.
9347 */
9348IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9349{
9350 /* The lazy approach for now... */
9351 PCRTUINT128U pu128Src;
9352 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9353 if (rc == VINF_SUCCESS)
9354 {
9355 pu128Dst->au64[0] = pu128Src->au64[0];
9356 pu128Dst->au64[1] = pu128Src->au64[1];
9357 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9358 }
9359 return rc;
9360}
9361
9362
9363#ifdef IEM_WITH_SETJMP
9364/**
9365 * Fetches a data dqword (double qword), generally SSE related.
9366 *
9367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9368 * @param pu128Dst Where to return the qword.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375 /* The lazy approach for now... */
9376 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9377 pu128Dst->au64[0] = pu128Src->au64[0];
9378 pu128Dst->au64[1] = pu128Src->au64[1];
9379 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9380}
9381#endif
9382
9383
9384/**
9385 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9386 * related.
9387 *
9388 * Raises \#GP(0) if not aligned.
9389 *
9390 * @returns Strict VBox status code.
9391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9392 * @param pu128Dst Where to return the qword.
9393 * @param iSegReg The index of the segment register to use for
9394 * this access. The base and limits are checked.
9395 * @param GCPtrMem The address of the guest memory.
9396 */
9397IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9398{
9399 /* The lazy approach for now... */
9400 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9401 if ( (GCPtrMem & 15)
9402 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9403 return iemRaiseGeneralProtectionFault0(pVCpu);
9404
9405 PCRTUINT128U pu128Src;
9406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9407 if (rc == VINF_SUCCESS)
9408 {
9409 pu128Dst->au64[0] = pu128Src->au64[0];
9410 pu128Dst->au64[1] = pu128Src->au64[1];
9411 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9412 }
9413 return rc;
9414}
9415
9416
9417#ifdef IEM_WITH_SETJMP
9418/**
9419 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9420 * related, longjmp on error.
9421 *
9422 * Raises \#GP(0) if not aligned.
9423 *
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param pu128Dst Where to return the qword.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432 /* The lazy approach for now... */
9433 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9434 if ( (GCPtrMem & 15) == 0
9435 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9436 {
9437 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9438 pu128Dst->au64[0] = pu128Src->au64[0];
9439 pu128Dst->au64[1] = pu128Src->au64[1];
9440 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9441 return;
9442 }
9443
9444 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9445 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9446}
9447#endif
9448
9449
9450
9451/**
9452 * Fetches a descriptor register (lgdt, lidt).
9453 *
9454 * @returns Strict VBox status code.
9455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9456 * @param pcbLimit Where to return the limit.
9457 * @param pGCPtrBase Where to return the base.
9458 * @param iSegReg The index of the segment register to use for
9459 * this access. The base and limits are checked.
9460 * @param GCPtrMem The address of the guest memory.
9461 * @param enmOpSize The effective operand size.
9462 */
9463IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9464 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9465{
9466 /*
9467 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9468 * little special:
9469 * - The two reads are done separately.
9470 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9471 * - We suspect the 386 to actually commit the limit before the base in
9472 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9473 * don't try emulate this eccentric behavior, because it's not well
9474 * enough understood and rather hard to trigger.
9475 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9476 */
9477 VBOXSTRICTRC rcStrict;
9478 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9479 {
9480 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9481 if (rcStrict == VINF_SUCCESS)
9482 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9483 }
9484 else
9485 {
9486 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9487 if (enmOpSize == IEMMODE_32BIT)
9488 {
9489 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9490 {
9491 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9492 if (rcStrict == VINF_SUCCESS)
9493 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9494 }
9495 else
9496 {
9497 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9498 if (rcStrict == VINF_SUCCESS)
9499 {
9500 *pcbLimit = (uint16_t)uTmp;
9501 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9502 }
9503 }
9504 if (rcStrict == VINF_SUCCESS)
9505 *pGCPtrBase = uTmp;
9506 }
9507 else
9508 {
9509 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9510 if (rcStrict == VINF_SUCCESS)
9511 {
9512 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9513 if (rcStrict == VINF_SUCCESS)
9514 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9515 }
9516 }
9517 }
9518 return rcStrict;
9519}
9520
9521
9522
9523/**
9524 * Stores a data byte.
9525 *
9526 * @returns Strict VBox status code.
9527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9528 * @param iSegReg The index of the segment register to use for
9529 * this access. The base and limits are checked.
9530 * @param GCPtrMem The address of the guest memory.
9531 * @param u8Value The value to store.
9532 */
9533IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9534{
9535 /* The lazy approach for now... */
9536 uint8_t *pu8Dst;
9537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9538 if (rc == VINF_SUCCESS)
9539 {
9540 *pu8Dst = u8Value;
9541 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9542 }
9543 return rc;
9544}
9545
9546
9547#ifdef IEM_WITH_SETJMP
9548/**
9549 * Stores a data byte, longjmp on error.
9550 *
9551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9552 * @param iSegReg The index of the segment register to use for
9553 * this access. The base and limits are checked.
9554 * @param GCPtrMem The address of the guest memory.
9555 * @param u8Value The value to store.
9556 */
9557IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9558{
9559 /* The lazy approach for now... */
9560 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9561 *pu8Dst = u8Value;
9562 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9563}
9564#endif
9565
9566
9567/**
9568 * Stores a data word.
9569 *
9570 * @returns Strict VBox status code.
9571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9572 * @param iSegReg The index of the segment register to use for
9573 * this access. The base and limits are checked.
9574 * @param GCPtrMem The address of the guest memory.
9575 * @param u16Value The value to store.
9576 */
9577IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9578{
9579 /* The lazy approach for now... */
9580 uint16_t *pu16Dst;
9581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9582 if (rc == VINF_SUCCESS)
9583 {
9584 *pu16Dst = u16Value;
9585 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9586 }
9587 return rc;
9588}
9589
9590
9591#ifdef IEM_WITH_SETJMP
9592/**
9593 * Stores a data word, longjmp on error.
9594 *
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param iSegReg The index of the segment register to use for
9597 * this access. The base and limits are checked.
9598 * @param GCPtrMem The address of the guest memory.
9599 * @param u16Value The value to store.
9600 */
9601IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9602{
9603 /* The lazy approach for now... */
9604 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9605 *pu16Dst = u16Value;
9606 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9607}
9608#endif
9609
9610
9611/**
9612 * Stores a data dword.
9613 *
9614 * @returns Strict VBox status code.
9615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9616 * @param iSegReg The index of the segment register to use for
9617 * this access. The base and limits are checked.
9618 * @param GCPtrMem The address of the guest memory.
9619 * @param u32Value The value to store.
9620 */
9621IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9622{
9623 /* The lazy approach for now... */
9624 uint32_t *pu32Dst;
9625 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9626 if (rc == VINF_SUCCESS)
9627 {
9628 *pu32Dst = u32Value;
9629 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9630 }
9631 return rc;
9632}
9633
9634
9635#ifdef IEM_WITH_SETJMP
9636/**
9637 * Stores a data dword.
9638 *
9639 * @returns Strict VBox status code.
9640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9641 * @param iSegReg The index of the segment register to use for
9642 * this access. The base and limits are checked.
9643 * @param GCPtrMem The address of the guest memory.
9644 * @param u32Value The value to store.
9645 */
9646IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9647{
9648 /* The lazy approach for now... */
9649 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9650 *pu32Dst = u32Value;
9651 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9652}
9653#endif
9654
9655
9656/**
9657 * Stores a data qword.
9658 *
9659 * @returns Strict VBox status code.
9660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9661 * @param iSegReg The index of the segment register to use for
9662 * this access. The base and limits are checked.
9663 * @param GCPtrMem The address of the guest memory.
9664 * @param u64Value The value to store.
9665 */
9666IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9667{
9668 /* The lazy approach for now... */
9669 uint64_t *pu64Dst;
9670 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9671 if (rc == VINF_SUCCESS)
9672 {
9673 *pu64Dst = u64Value;
9674 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9675 }
9676 return rc;
9677}
9678
9679
9680#ifdef IEM_WITH_SETJMP
9681/**
9682 * Stores a data qword, longjmp on error.
9683 *
9684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9685 * @param iSegReg The index of the segment register to use for
9686 * this access. The base and limits are checked.
9687 * @param GCPtrMem The address of the guest memory.
9688 * @param u64Value The value to store.
9689 */
9690IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9691{
9692 /* The lazy approach for now... */
9693 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9694 *pu64Dst = u64Value;
9695 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9696}
9697#endif
9698
9699
9700/**
9701 * Stores a data dqword.
9702 *
9703 * @returns Strict VBox status code.
9704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9705 * @param iSegReg The index of the segment register to use for
9706 * this access. The base and limits are checked.
9707 * @param GCPtrMem The address of the guest memory.
9708 * @param u128Value The value to store.
9709 */
9710IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9711{
9712 /* The lazy approach for now... */
9713 PRTUINT128U pu128Dst;
9714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9715 if (rc == VINF_SUCCESS)
9716 {
9717 pu128Dst->au64[0] = u128Value.au64[0];
9718 pu128Dst->au64[1] = u128Value.au64[1];
9719 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9720 }
9721 return rc;
9722}
9723
9724
9725#ifdef IEM_WITH_SETJMP
9726/**
9727 * Stores a data dqword, longjmp on error.
9728 *
9729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9730 * @param iSegReg The index of the segment register to use for
9731 * this access. The base and limits are checked.
9732 * @param GCPtrMem The address of the guest memory.
9733 * @param u128Value The value to store.
9734 */
9735IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9736{
9737 /* The lazy approach for now... */
9738 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9739 pu128Dst->au64[0] = u128Value.au64[0];
9740 pu128Dst->au64[1] = u128Value.au64[1];
9741 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9742}
9743#endif
9744
9745
9746/**
9747 * Stores a data dqword, SSE aligned.
9748 *
9749 * @returns Strict VBox status code.
9750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9751 * @param iSegReg The index of the segment register to use for
9752 * this access. The base and limits are checked.
9753 * @param GCPtrMem The address of the guest memory.
9754 * @param u128Value The value to store.
9755 */
9756IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9757{
9758 /* The lazy approach for now... */
9759 if ( (GCPtrMem & 15)
9760 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9761 return iemRaiseGeneralProtectionFault0(pVCpu);
9762
9763 PRTUINT128U pu128Dst;
9764 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9765 if (rc == VINF_SUCCESS)
9766 {
9767 pu128Dst->au64[0] = u128Value.au64[0];
9768 pu128Dst->au64[1] = u128Value.au64[1];
9769 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9770 }
9771 return rc;
9772}
9773
9774
9775#ifdef IEM_WITH_SETJMP
9776/**
9777 * Stores a data dqword, SSE aligned.
9778 *
9779 * @returns Strict VBox status code.
9780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9781 * @param iSegReg The index of the segment register to use for
9782 * this access. The base and limits are checked.
9783 * @param GCPtrMem The address of the guest memory.
9784 * @param u128Value The value to store.
9785 */
9786DECL_NO_INLINE(IEM_STATIC, void)
9787iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9788{
9789 /* The lazy approach for now... */
9790 if ( (GCPtrMem & 15) == 0
9791 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9792 {
9793 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9794 pu128Dst->au64[0] = u128Value.au64[0];
9795 pu128Dst->au64[1] = u128Value.au64[1];
9796 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9797 return;
9798 }
9799
9800 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9801 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9802}
9803#endif
9804
9805
9806/**
9807 * Stores a descriptor register (sgdt, sidt).
9808 *
9809 * @returns Strict VBox status code.
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param cbLimit The limit.
9812 * @param GCPtrBase The base address.
9813 * @param iSegReg The index of the segment register to use for
9814 * this access. The base and limits are checked.
9815 * @param GCPtrMem The address of the guest memory.
9816 */
9817IEM_STATIC VBOXSTRICTRC
9818iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9819{
9820 VBOXSTRICTRC rcStrict;
9821 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
9822 {
9823 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
9824 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
9825 }
9826
9827 /*
9828 * The SIDT and SGDT instructions actually stores the data using two
9829 * independent writes. The instructions does not respond to opsize prefixes.
9830 */
9831 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9832 if (rcStrict == VINF_SUCCESS)
9833 {
9834 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9835 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9836 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9837 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9838 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9839 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9840 else
9841 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9842 }
9843 return rcStrict;
9844}
9845
9846
9847/**
9848 * Pushes a word onto the stack.
9849 *
9850 * @returns Strict VBox status code.
9851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9852 * @param u16Value The value to push.
9853 */
9854IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9855{
9856 /* Increment the stack pointer. */
9857 uint64_t uNewRsp;
9858 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9859 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9860
9861 /* Write the word the lazy way. */
9862 uint16_t *pu16Dst;
9863 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9864 if (rc == VINF_SUCCESS)
9865 {
9866 *pu16Dst = u16Value;
9867 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9868 }
9869
9870 /* Commit the new RSP value unless we an access handler made trouble. */
9871 if (rc == VINF_SUCCESS)
9872 pCtx->rsp = uNewRsp;
9873
9874 return rc;
9875}
9876
9877
9878/**
9879 * Pushes a dword onto the stack.
9880 *
9881 * @returns Strict VBox status code.
9882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9883 * @param u32Value The value to push.
9884 */
9885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9886{
9887 /* Increment the stack pointer. */
9888 uint64_t uNewRsp;
9889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9890 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9891
9892 /* Write the dword the lazy way. */
9893 uint32_t *pu32Dst;
9894 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9895 if (rc == VINF_SUCCESS)
9896 {
9897 *pu32Dst = u32Value;
9898 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9899 }
9900
9901 /* Commit the new RSP value unless we an access handler made trouble. */
9902 if (rc == VINF_SUCCESS)
9903 pCtx->rsp = uNewRsp;
9904
9905 return rc;
9906}
9907
9908
9909/**
9910 * Pushes a dword segment register value onto the stack.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param u32Value The value to push.
9915 */
9916IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9917{
9918 /* Increment the stack pointer. */
9919 uint64_t uNewRsp;
9920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9921 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9922
9923 VBOXSTRICTRC rc;
9924 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9925 {
9926 /* The recompiler writes a full dword. */
9927 uint32_t *pu32Dst;
9928 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9929 if (rc == VINF_SUCCESS)
9930 {
9931 *pu32Dst = u32Value;
9932 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9933 }
9934 }
9935 else
9936 {
9937 /* The intel docs talks about zero extending the selector register
9938 value. My actual intel CPU here might be zero extending the value
9939 but it still only writes the lower word... */
9940 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9941 * happens when crossing an electric page boundrary, is the high word checked
9942 * for write accessibility or not? Probably it is. What about segment limits?
9943 * It appears this behavior is also shared with trap error codes.
9944 *
9945 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9946 * ancient hardware when it actually did change. */
9947 uint16_t *pu16Dst;
9948 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9949 if (rc == VINF_SUCCESS)
9950 {
9951 *pu16Dst = (uint16_t)u32Value;
9952 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9953 }
9954 }
9955
9956 /* Commit the new RSP value unless we an access handler made trouble. */
9957 if (rc == VINF_SUCCESS)
9958 pCtx->rsp = uNewRsp;
9959
9960 return rc;
9961}
9962
9963
9964/**
9965 * Pushes a qword onto the stack.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param u64Value The value to push.
9970 */
9971IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9972{
9973 /* Increment the stack pointer. */
9974 uint64_t uNewRsp;
9975 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9976 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9977
9978 /* Write the word the lazy way. */
9979 uint64_t *pu64Dst;
9980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9981 if (rc == VINF_SUCCESS)
9982 {
9983 *pu64Dst = u64Value;
9984 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9985 }
9986
9987 /* Commit the new RSP value unless we an access handler made trouble. */
9988 if (rc == VINF_SUCCESS)
9989 pCtx->rsp = uNewRsp;
9990
9991 return rc;
9992}
9993
9994
9995/**
9996 * Pops a word from the stack.
9997 *
9998 * @returns Strict VBox status code.
9999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10000 * @param pu16Value Where to store the popped value.
10001 */
10002IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10003{
10004 /* Increment the stack pointer. */
10005 uint64_t uNewRsp;
10006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10007 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10008
10009 /* Write the word the lazy way. */
10010 uint16_t const *pu16Src;
10011 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10012 if (rc == VINF_SUCCESS)
10013 {
10014 *pu16Value = *pu16Src;
10015 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10016
10017 /* Commit the new RSP value. */
10018 if (rc == VINF_SUCCESS)
10019 pCtx->rsp = uNewRsp;
10020 }
10021
10022 return rc;
10023}
10024
10025
10026/**
10027 * Pops a dword from the stack.
10028 *
10029 * @returns Strict VBox status code.
10030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10031 * @param pu32Value Where to store the popped value.
10032 */
10033IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10034{
10035 /* Increment the stack pointer. */
10036 uint64_t uNewRsp;
10037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10038 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10039
10040 /* Write the word the lazy way. */
10041 uint32_t const *pu32Src;
10042 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10043 if (rc == VINF_SUCCESS)
10044 {
10045 *pu32Value = *pu32Src;
10046 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10047
10048 /* Commit the new RSP value. */
10049 if (rc == VINF_SUCCESS)
10050 pCtx->rsp = uNewRsp;
10051 }
10052
10053 return rc;
10054}
10055
10056
10057/**
10058 * Pops a qword from the stack.
10059 *
10060 * @returns Strict VBox status code.
10061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10062 * @param pu64Value Where to store the popped value.
10063 */
10064IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10065{
10066 /* Increment the stack pointer. */
10067 uint64_t uNewRsp;
10068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10069 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10070
10071 /* Write the word the lazy way. */
10072 uint64_t const *pu64Src;
10073 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10074 if (rc == VINF_SUCCESS)
10075 {
10076 *pu64Value = *pu64Src;
10077 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10078
10079 /* Commit the new RSP value. */
10080 if (rc == VINF_SUCCESS)
10081 pCtx->rsp = uNewRsp;
10082 }
10083
10084 return rc;
10085}
10086
10087
10088/**
10089 * Pushes a word onto the stack, using a temporary stack pointer.
10090 *
10091 * @returns Strict VBox status code.
10092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10093 * @param u16Value The value to push.
10094 * @param pTmpRsp Pointer to the temporary stack pointer.
10095 */
10096IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10097{
10098 /* Increment the stack pointer. */
10099 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10100 RTUINT64U NewRsp = *pTmpRsp;
10101 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10102
10103 /* Write the word the lazy way. */
10104 uint16_t *pu16Dst;
10105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10106 if (rc == VINF_SUCCESS)
10107 {
10108 *pu16Dst = u16Value;
10109 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10110 }
10111
10112 /* Commit the new RSP value unless we an access handler made trouble. */
10113 if (rc == VINF_SUCCESS)
10114 *pTmpRsp = NewRsp;
10115
10116 return rc;
10117}
10118
10119
10120/**
10121 * Pushes a dword onto the stack, using a temporary stack pointer.
10122 *
10123 * @returns Strict VBox status code.
10124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10125 * @param u32Value The value to push.
10126 * @param pTmpRsp Pointer to the temporary stack pointer.
10127 */
10128IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10129{
10130 /* Increment the stack pointer. */
10131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10132 RTUINT64U NewRsp = *pTmpRsp;
10133 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10134
10135 /* Write the word the lazy way. */
10136 uint32_t *pu32Dst;
10137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10138 if (rc == VINF_SUCCESS)
10139 {
10140 *pu32Dst = u32Value;
10141 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10142 }
10143
10144 /* Commit the new RSP value unless we an access handler made trouble. */
10145 if (rc == VINF_SUCCESS)
10146 *pTmpRsp = NewRsp;
10147
10148 return rc;
10149}
10150
10151
10152/**
10153 * Pushes a dword onto the stack, using a temporary stack pointer.
10154 *
10155 * @returns Strict VBox status code.
10156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10157 * @param u64Value The value to push.
10158 * @param pTmpRsp Pointer to the temporary stack pointer.
10159 */
10160IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10161{
10162 /* Increment the stack pointer. */
10163 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10164 RTUINT64U NewRsp = *pTmpRsp;
10165 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10166
10167 /* Write the word the lazy way. */
10168 uint64_t *pu64Dst;
10169 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10170 if (rc == VINF_SUCCESS)
10171 {
10172 *pu64Dst = u64Value;
10173 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10174 }
10175
10176 /* Commit the new RSP value unless we an access handler made trouble. */
10177 if (rc == VINF_SUCCESS)
10178 *pTmpRsp = NewRsp;
10179
10180 return rc;
10181}
10182
10183
10184/**
10185 * Pops a word from the stack, using a temporary stack pointer.
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10189 * @param pu16Value Where to store the popped value.
10190 * @param pTmpRsp Pointer to the temporary stack pointer.
10191 */
10192IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10193{
10194 /* Increment the stack pointer. */
10195 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10196 RTUINT64U NewRsp = *pTmpRsp;
10197 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10198
10199 /* Write the word the lazy way. */
10200 uint16_t const *pu16Src;
10201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10202 if (rc == VINF_SUCCESS)
10203 {
10204 *pu16Value = *pu16Src;
10205 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10206
10207 /* Commit the new RSP value. */
10208 if (rc == VINF_SUCCESS)
10209 *pTmpRsp = NewRsp;
10210 }
10211
10212 return rc;
10213}
10214
10215
10216/**
10217 * Pops a dword from the stack, using a temporary stack pointer.
10218 *
10219 * @returns Strict VBox status code.
10220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10221 * @param pu32Value Where to store the popped value.
10222 * @param pTmpRsp Pointer to the temporary stack pointer.
10223 */
10224IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10225{
10226 /* Increment the stack pointer. */
10227 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10228 RTUINT64U NewRsp = *pTmpRsp;
10229 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10230
10231 /* Write the word the lazy way. */
10232 uint32_t const *pu32Src;
10233 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10234 if (rc == VINF_SUCCESS)
10235 {
10236 *pu32Value = *pu32Src;
10237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10238
10239 /* Commit the new RSP value. */
10240 if (rc == VINF_SUCCESS)
10241 *pTmpRsp = NewRsp;
10242 }
10243
10244 return rc;
10245}
10246
10247
10248/**
10249 * Pops a qword from the stack, using a temporary stack pointer.
10250 *
10251 * @returns Strict VBox status code.
10252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10253 * @param pu64Value Where to store the popped value.
10254 * @param pTmpRsp Pointer to the temporary stack pointer.
10255 */
10256IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10257{
10258 /* Increment the stack pointer. */
10259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10260 RTUINT64U NewRsp = *pTmpRsp;
10261 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10262
10263 /* Write the word the lazy way. */
10264 uint64_t const *pu64Src;
10265 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10266 if (rcStrict == VINF_SUCCESS)
10267 {
10268 *pu64Value = *pu64Src;
10269 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10270
10271 /* Commit the new RSP value. */
10272 if (rcStrict == VINF_SUCCESS)
10273 *pTmpRsp = NewRsp;
10274 }
10275
10276 return rcStrict;
10277}
10278
10279
10280/**
10281 * Begin a special stack push (used by interrupt, exceptions and such).
10282 *
10283 * This will raise \#SS or \#PF if appropriate.
10284 *
10285 * @returns Strict VBox status code.
10286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10287 * @param cbMem The number of bytes to push onto the stack.
10288 * @param ppvMem Where to return the pointer to the stack memory.
10289 * As with the other memory functions this could be
10290 * direct access or bounce buffered access, so
10291 * don't commit register until the commit call
10292 * succeeds.
10293 * @param puNewRsp Where to return the new RSP value. This must be
10294 * passed unchanged to
10295 * iemMemStackPushCommitSpecial().
10296 */
10297IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10298{
10299 Assert(cbMem < UINT8_MAX);
10300 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10301 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10302 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10303}
10304
10305
10306/**
10307 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10308 *
10309 * This will update the rSP.
10310 *
10311 * @returns Strict VBox status code.
10312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10313 * @param pvMem The pointer returned by
10314 * iemMemStackPushBeginSpecial().
10315 * @param uNewRsp The new RSP value returned by
10316 * iemMemStackPushBeginSpecial().
10317 */
10318IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10319{
10320 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10321 if (rcStrict == VINF_SUCCESS)
10322 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10323 return rcStrict;
10324}
10325
10326
10327/**
10328 * Begin a special stack pop (used by iret, retf and such).
10329 *
10330 * This will raise \#SS or \#PF if appropriate.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10334 * @param cbMem The number of bytes to pop from the stack.
10335 * @param ppvMem Where to return the pointer to the stack memory.
10336 * @param puNewRsp Where to return the new RSP value. This must be
10337 * assigned to CPUMCTX::rsp manually some time
10338 * after iemMemStackPopDoneSpecial() has been
10339 * called.
10340 */
10341IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10342{
10343 Assert(cbMem < UINT8_MAX);
10344 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10345 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10346 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10347}
10348
10349
10350/**
10351 * Continue a special stack pop (used by iret and retf).
10352 *
10353 * This will raise \#SS or \#PF if appropriate.
10354 *
10355 * @returns Strict VBox status code.
10356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10357 * @param cbMem The number of bytes to pop from the stack.
10358 * @param ppvMem Where to return the pointer to the stack memory.
10359 * @param puNewRsp Where to return the new RSP value. This must be
10360 * assigned to CPUMCTX::rsp manually some time
10361 * after iemMemStackPopDoneSpecial() has been
10362 * called.
10363 */
10364IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10365{
10366 Assert(cbMem < UINT8_MAX);
10367 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10368 RTUINT64U NewRsp;
10369 NewRsp.u = *puNewRsp;
10370 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10371 *puNewRsp = NewRsp.u;
10372 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10373}
10374
10375
10376/**
10377 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10378 * iemMemStackPopContinueSpecial).
10379 *
10380 * The caller will manually commit the rSP.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param pvMem The pointer returned by
10385 * iemMemStackPopBeginSpecial() or
10386 * iemMemStackPopContinueSpecial().
10387 */
10388IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10389{
10390 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10391}
10392
10393
10394/**
10395 * Fetches a system table byte.
10396 *
10397 * @returns Strict VBox status code.
10398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10399 * @param pbDst Where to return the byte.
10400 * @param iSegReg The index of the segment register to use for
10401 * this access. The base and limits are checked.
10402 * @param GCPtrMem The address of the guest memory.
10403 */
10404IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10405{
10406 /* The lazy approach for now... */
10407 uint8_t const *pbSrc;
10408 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10409 if (rc == VINF_SUCCESS)
10410 {
10411 *pbDst = *pbSrc;
10412 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10413 }
10414 return rc;
10415}
10416
10417
10418/**
10419 * Fetches a system table word.
10420 *
10421 * @returns Strict VBox status code.
10422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10423 * @param pu16Dst Where to return the word.
10424 * @param iSegReg The index of the segment register to use for
10425 * this access. The base and limits are checked.
10426 * @param GCPtrMem The address of the guest memory.
10427 */
10428IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10429{
10430 /* The lazy approach for now... */
10431 uint16_t const *pu16Src;
10432 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10433 if (rc == VINF_SUCCESS)
10434 {
10435 *pu16Dst = *pu16Src;
10436 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10437 }
10438 return rc;
10439}
10440
10441
10442/**
10443 * Fetches a system table dword.
10444 *
10445 * @returns Strict VBox status code.
10446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10447 * @param pu32Dst Where to return the dword.
10448 * @param iSegReg The index of the segment register to use for
10449 * this access. The base and limits are checked.
10450 * @param GCPtrMem The address of the guest memory.
10451 */
10452IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10453{
10454 /* The lazy approach for now... */
10455 uint32_t const *pu32Src;
10456 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10457 if (rc == VINF_SUCCESS)
10458 {
10459 *pu32Dst = *pu32Src;
10460 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10461 }
10462 return rc;
10463}
10464
10465
10466/**
10467 * Fetches a system table qword.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param pu64Dst Where to return the qword.
10472 * @param iSegReg The index of the segment register to use for
10473 * this access. The base and limits are checked.
10474 * @param GCPtrMem The address of the guest memory.
10475 */
10476IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10477{
10478 /* The lazy approach for now... */
10479 uint64_t const *pu64Src;
10480 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10481 if (rc == VINF_SUCCESS)
10482 {
10483 *pu64Dst = *pu64Src;
10484 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10485 }
10486 return rc;
10487}
10488
10489
10490/**
10491 * Fetches a descriptor table entry with caller specified error code.
10492 *
10493 * @returns Strict VBox status code.
10494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10495 * @param pDesc Where to return the descriptor table entry.
10496 * @param uSel The selector which table entry to fetch.
10497 * @param uXcpt The exception to raise on table lookup error.
10498 * @param uErrorCode The error code associated with the exception.
10499 */
10500IEM_STATIC VBOXSTRICTRC
10501iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10502{
10503 AssertPtr(pDesc);
10504 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10505
10506 /** @todo did the 286 require all 8 bytes to be accessible? */
10507 /*
10508 * Get the selector table base and check bounds.
10509 */
10510 RTGCPTR GCPtrBase;
10511 if (uSel & X86_SEL_LDT)
10512 {
10513 if ( !pCtx->ldtr.Attr.n.u1Present
10514 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10515 {
10516 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10517 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10518 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10519 uErrorCode, 0);
10520 }
10521
10522 Assert(pCtx->ldtr.Attr.n.u1Present);
10523 GCPtrBase = pCtx->ldtr.u64Base;
10524 }
10525 else
10526 {
10527 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10528 {
10529 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10530 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10531 uErrorCode, 0);
10532 }
10533 GCPtrBase = pCtx->gdtr.pGdt;
10534 }
10535
10536 /*
10537 * Read the legacy descriptor and maybe the long mode extensions if
10538 * required.
10539 */
10540 VBOXSTRICTRC rcStrict;
10541 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10542 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10543 else
10544 {
10545 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10546 if (rcStrict == VINF_SUCCESS)
10547 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10548 if (rcStrict == VINF_SUCCESS)
10549 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10550 if (rcStrict == VINF_SUCCESS)
10551 pDesc->Legacy.au16[3] = 0;
10552 else
10553 return rcStrict;
10554 }
10555
10556 if (rcStrict == VINF_SUCCESS)
10557 {
10558 if ( !IEM_IS_LONG_MODE(pVCpu)
10559 || pDesc->Legacy.Gen.u1DescType)
10560 pDesc->Long.au64[1] = 0;
10561 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10562 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10563 else
10564 {
10565 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10566 /** @todo is this the right exception? */
10567 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10568 }
10569 }
10570 return rcStrict;
10571}
10572
10573
10574/**
10575 * Fetches a descriptor table entry.
10576 *
10577 * @returns Strict VBox status code.
10578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10579 * @param pDesc Where to return the descriptor table entry.
10580 * @param uSel The selector which table entry to fetch.
10581 * @param uXcpt The exception to raise on table lookup error.
10582 */
10583IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10584{
10585 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10586}
10587
10588
10589/**
10590 * Fakes a long mode stack selector for SS = 0.
10591 *
10592 * @param pDescSs Where to return the fake stack descriptor.
10593 * @param uDpl The DPL we want.
10594 */
10595IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10596{
10597 pDescSs->Long.au64[0] = 0;
10598 pDescSs->Long.au64[1] = 0;
10599 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10600 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10601 pDescSs->Long.Gen.u2Dpl = uDpl;
10602 pDescSs->Long.Gen.u1Present = 1;
10603 pDescSs->Long.Gen.u1Long = 1;
10604}
10605
10606
10607/**
10608 * Marks the selector descriptor as accessed (only non-system descriptors).
10609 *
10610 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10611 * will therefore skip the limit checks.
10612 *
10613 * @returns Strict VBox status code.
10614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10615 * @param uSel The selector.
10616 */
10617IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10618{
10619 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10620
10621 /*
10622 * Get the selector table base and calculate the entry address.
10623 */
10624 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10625 ? pCtx->ldtr.u64Base
10626 : pCtx->gdtr.pGdt;
10627 GCPtr += uSel & X86_SEL_MASK;
10628
10629 /*
10630 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10631 * ugly stuff to avoid this. This will make sure it's an atomic access
10632 * as well more or less remove any question about 8-bit or 32-bit accesss.
10633 */
10634 VBOXSTRICTRC rcStrict;
10635 uint32_t volatile *pu32;
10636 if ((GCPtr & 3) == 0)
10637 {
10638 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10639 GCPtr += 2 + 2;
10640 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10641 if (rcStrict != VINF_SUCCESS)
10642 return rcStrict;
10643 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10644 }
10645 else
10646 {
10647 /* The misaligned GDT/LDT case, map the whole thing. */
10648 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10649 if (rcStrict != VINF_SUCCESS)
10650 return rcStrict;
10651 switch ((uintptr_t)pu32 & 3)
10652 {
10653 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10654 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10655 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10656 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10657 }
10658 }
10659
10660 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10661}
10662
10663/** @} */
10664
10665
10666/*
10667 * Include the C/C++ implementation of instruction.
10668 */
10669#include "IEMAllCImpl.cpp.h"
10670
10671
10672
10673/** @name "Microcode" macros.
10674 *
10675 * The idea is that we should be able to use the same code to interpret
10676 * instructions as well as recompiler instructions. Thus this obfuscation.
10677 *
10678 * @{
10679 */
10680#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10681#define IEM_MC_END() }
10682#define IEM_MC_PAUSE() do {} while (0)
10683#define IEM_MC_CONTINUE() do {} while (0)
10684
10685/** Internal macro. */
10686#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10687 do \
10688 { \
10689 VBOXSTRICTRC rcStrict2 = a_Expr; \
10690 if (rcStrict2 != VINF_SUCCESS) \
10691 return rcStrict2; \
10692 } while (0)
10693
10694
10695#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10696#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10697#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10698#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10699#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10700#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10701#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10702#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10703#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10704 do { \
10705 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10706 return iemRaiseDeviceNotAvailable(pVCpu); \
10707 } while (0)
10708#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10709 do { \
10710 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10711 return iemRaiseDeviceNotAvailable(pVCpu); \
10712 } while (0)
10713#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10714 do { \
10715 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10716 return iemRaiseMathFault(pVCpu); \
10717 } while (0)
10718#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10719 do { \
10720 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10721 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10722 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10723 return iemRaiseUndefinedOpcode(pVCpu); \
10724 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10725 return iemRaiseDeviceNotAvailable(pVCpu); \
10726 } while (0)
10727#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10728 do { \
10729 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10730 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10731 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10732 return iemRaiseUndefinedOpcode(pVCpu); \
10733 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10734 return iemRaiseDeviceNotAvailable(pVCpu); \
10735 } while (0)
10736#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10737 do { \
10738 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10739 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10740 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10741 return iemRaiseUndefinedOpcode(pVCpu); \
10742 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10743 return iemRaiseDeviceNotAvailable(pVCpu); \
10744 } while (0)
10745#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10746 do { \
10747 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10748 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10749 return iemRaiseUndefinedOpcode(pVCpu); \
10750 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10751 return iemRaiseDeviceNotAvailable(pVCpu); \
10752 } while (0)
10753#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10754 do { \
10755 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10756 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10757 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10758 return iemRaiseUndefinedOpcode(pVCpu); \
10759 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10760 return iemRaiseDeviceNotAvailable(pVCpu); \
10761 } while (0)
10762#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10763 do { \
10764 if (pVCpu->iem.s.uCpl != 0) \
10765 return iemRaiseGeneralProtectionFault0(pVCpu); \
10766 } while (0)
10767#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10768 do { \
10769 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10770 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10771 } while (0)
10772
10773
10774#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10775#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10776#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10777#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10778#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10779#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10780#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10781 uint32_t a_Name; \
10782 uint32_t *a_pName = &a_Name
10783#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10784 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10785
10786#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10787#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10788
10789#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10790#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10791#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10792#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10793#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10794#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10795#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10796#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10797#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10798#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10799#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10800#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10801#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10802#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10803#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10804#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10805#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10806#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10807#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10808#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10809#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10810#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10811#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10812#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10813#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10814#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10815#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10816#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10817#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10818/** @note Not for IOPL or IF testing or modification. */
10819#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10820#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10821#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10822#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10823
10824#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10825#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10826#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10827#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10828#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10829#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10830#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10831#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10832#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10833#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10834#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10835 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10836
10837#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10838#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10839/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10840 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10841#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10842#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10843/** @note Not for IOPL or IF testing or modification. */
10844#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10845
10846#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10847#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10848#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10849 do { \
10850 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10851 *pu32Reg += (a_u32Value); \
10852 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10853 } while (0)
10854#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10855
10856#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10857#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10858#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10859 do { \
10860 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10861 *pu32Reg -= (a_u32Value); \
10862 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10863 } while (0)
10864#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10865#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10866
10867#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10868#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10869#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10870#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10871#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10872#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10873#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10874
10875#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10876#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10877#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10878#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10879
10880#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10881#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10882#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10883
10884#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10885#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10886#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10887
10888#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10889#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10890#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10891
10892#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10893#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10894#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10895
10896#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10897
10898#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10899
10900#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10901#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10902#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10903 do { \
10904 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10905 *pu32Reg &= (a_u32Value); \
10906 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10907 } while (0)
10908#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10909
10910#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10911#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10912#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10913 do { \
10914 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10915 *pu32Reg |= (a_u32Value); \
10916 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10917 } while (0)
10918#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10919
10920
10921/** @note Not for IOPL or IF modification. */
10922#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10923/** @note Not for IOPL or IF modification. */
10924#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10925/** @note Not for IOPL or IF modification. */
10926#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10927
10928#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10929
10930
10931#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10932 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10933#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10934 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10935#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10936 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10937#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10938 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10939#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10940 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10941#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10942 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10943#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10944 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10945
10946#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10947 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
10948 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
10949 } while (0)
10950#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10951 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10952#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10953 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10954#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
10955 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
10956#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10957 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
10958 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
10959 } while (0)
10960#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10961 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10962#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10963 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10964 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10965 } while (0)
10966#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
10967 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
10968#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10969 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10970 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10971 } while (0)
10972#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10973 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10974#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10975 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10976#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10977 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10978#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10979 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
10980 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
10981 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
10982 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
10983 } while (0)
10984
10985#ifndef IEM_WITH_SETJMP
10986# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10987 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10988# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10989 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10990# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10991 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10992#else
10993# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10994 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10995# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10996 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10997# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10998 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10999#endif
11000
11001#ifndef IEM_WITH_SETJMP
11002# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11003 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11004# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11005 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11006# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11007 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11008#else
11009# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11010 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11011# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11012 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11013# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11014 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11015#endif
11016
11017#ifndef IEM_WITH_SETJMP
11018# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11019 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11020# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11021 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11022# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11023 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11024#else
11025# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11026 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11027# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11028 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11029# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11030 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11031#endif
11032
11033#ifdef SOME_UNUSED_FUNCTION
11034# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11035 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11036#endif
11037
11038#ifndef IEM_WITH_SETJMP
11039# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11040 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11041# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11042 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11043# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11044 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11045# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11046 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11047#else
11048# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11049 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11050# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11051 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11052# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11053 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11054# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11055 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11056#endif
11057
11058#ifndef IEM_WITH_SETJMP
11059# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11060 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11061# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11062 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11063# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11064 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11065#else
11066# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11067 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11068# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11069 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11070# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11071 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11072#endif
11073
11074#ifndef IEM_WITH_SETJMP
11075# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11076 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11077# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11078 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11079#else
11080# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11081 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11082# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11083 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11084#endif
11085
11086
11087
11088#ifndef IEM_WITH_SETJMP
11089# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11090 do { \
11091 uint8_t u8Tmp; \
11092 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11093 (a_u16Dst) = u8Tmp; \
11094 } while (0)
11095# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11096 do { \
11097 uint8_t u8Tmp; \
11098 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11099 (a_u32Dst) = u8Tmp; \
11100 } while (0)
11101# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11102 do { \
11103 uint8_t u8Tmp; \
11104 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11105 (a_u64Dst) = u8Tmp; \
11106 } while (0)
11107# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11108 do { \
11109 uint16_t u16Tmp; \
11110 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11111 (a_u32Dst) = u16Tmp; \
11112 } while (0)
11113# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11114 do { \
11115 uint16_t u16Tmp; \
11116 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11117 (a_u64Dst) = u16Tmp; \
11118 } while (0)
11119# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11120 do { \
11121 uint32_t u32Tmp; \
11122 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11123 (a_u64Dst) = u32Tmp; \
11124 } while (0)
11125#else /* IEM_WITH_SETJMP */
11126# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11127 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11128# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11129 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11130# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11131 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11132# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11133 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11134# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11135 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11136# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11137 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11138#endif /* IEM_WITH_SETJMP */
11139
11140#ifndef IEM_WITH_SETJMP
11141# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11142 do { \
11143 uint8_t u8Tmp; \
11144 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11145 (a_u16Dst) = (int8_t)u8Tmp; \
11146 } while (0)
11147# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11148 do { \
11149 uint8_t u8Tmp; \
11150 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11151 (a_u32Dst) = (int8_t)u8Tmp; \
11152 } while (0)
11153# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11154 do { \
11155 uint8_t u8Tmp; \
11156 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11157 (a_u64Dst) = (int8_t)u8Tmp; \
11158 } while (0)
11159# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11160 do { \
11161 uint16_t u16Tmp; \
11162 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11163 (a_u32Dst) = (int16_t)u16Tmp; \
11164 } while (0)
11165# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11166 do { \
11167 uint16_t u16Tmp; \
11168 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11169 (a_u64Dst) = (int16_t)u16Tmp; \
11170 } while (0)
11171# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11172 do { \
11173 uint32_t u32Tmp; \
11174 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11175 (a_u64Dst) = (int32_t)u32Tmp; \
11176 } while (0)
11177#else /* IEM_WITH_SETJMP */
11178# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11179 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11180# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11181 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11182# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11183 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11184# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11185 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11186# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11187 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11188# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11189 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11190#endif /* IEM_WITH_SETJMP */
11191
11192#ifndef IEM_WITH_SETJMP
11193# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11194 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11195# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11196 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11197# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11198 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11199# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11200 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11201#else
11202# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11203 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11204# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11205 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11206# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11207 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11208# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11209 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11210#endif
11211
11212#ifndef IEM_WITH_SETJMP
11213# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11214 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11215# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11216 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11217# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11218 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11219# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11220 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11221#else
11222# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11223 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11224# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11225 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11226# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11227 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11228# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11229 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11230#endif
11231
11232#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11233#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11234#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11235#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11236#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11237#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11238#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11239 do { \
11240 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11241 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11242 } while (0)
11243
11244#ifndef IEM_WITH_SETJMP
11245# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11246 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11247# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11248 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11249#else
11250# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11251 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11252# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11253 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11254#endif
11255
11256
11257#define IEM_MC_PUSH_U16(a_u16Value) \
11258 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11259#define IEM_MC_PUSH_U32(a_u32Value) \
11260 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11261#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11262 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11263#define IEM_MC_PUSH_U64(a_u64Value) \
11264 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11265
11266#define IEM_MC_POP_U16(a_pu16Value) \
11267 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11268#define IEM_MC_POP_U32(a_pu32Value) \
11269 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11270#define IEM_MC_POP_U64(a_pu64Value) \
11271 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11272
11273/** Maps guest memory for direct or bounce buffered access.
11274 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11275 * @remarks May return.
11276 */
11277#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11278 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11279
11280/** Maps guest memory for direct or bounce buffered access.
11281 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11282 * @remarks May return.
11283 */
11284#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11285 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11286
11287/** Commits the memory and unmaps the guest memory.
11288 * @remarks May return.
11289 */
11290#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11291 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11292
11293/** Commits the memory and unmaps the guest memory unless the FPU status word
11294 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11295 * that would cause FLD not to store.
11296 *
11297 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11298 * store, while \#P will not.
11299 *
11300 * @remarks May in theory return - for now.
11301 */
11302#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11303 do { \
11304 if ( !(a_u16FSW & X86_FSW_ES) \
11305 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11306 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11307 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11308 } while (0)
11309
11310/** Calculate efficient address from R/M. */
11311#ifndef IEM_WITH_SETJMP
11312# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11313 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11314#else
11315# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11316 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11317#endif
11318
11319#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11320#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11321#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11322#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11323#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11324#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11325#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11326
11327/**
11328 * Defers the rest of the instruction emulation to a C implementation routine
11329 * and returns, only taking the standard parameters.
11330 *
11331 * @param a_pfnCImpl The pointer to the C routine.
11332 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11333 */
11334#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11335
11336/**
11337 * Defers the rest of instruction emulation to a C implementation routine and
11338 * returns, taking one argument in addition to the standard ones.
11339 *
11340 * @param a_pfnCImpl The pointer to the C routine.
11341 * @param a0 The argument.
11342 */
11343#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11344
11345/**
11346 * Defers the rest of the instruction emulation to a C implementation routine
11347 * and returns, taking two arguments in addition to the standard ones.
11348 *
11349 * @param a_pfnCImpl The pointer to the C routine.
11350 * @param a0 The first extra argument.
11351 * @param a1 The second extra argument.
11352 */
11353#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11354
11355/**
11356 * Defers the rest of the instruction emulation to a C implementation routine
11357 * and returns, taking three arguments in addition to the standard ones.
11358 *
11359 * @param a_pfnCImpl The pointer to the C routine.
11360 * @param a0 The first extra argument.
11361 * @param a1 The second extra argument.
11362 * @param a2 The third extra argument.
11363 */
11364#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11365
11366/**
11367 * Defers the rest of the instruction emulation to a C implementation routine
11368 * and returns, taking four arguments in addition to the standard ones.
11369 *
11370 * @param a_pfnCImpl The pointer to the C routine.
11371 * @param a0 The first extra argument.
11372 * @param a1 The second extra argument.
11373 * @param a2 The third extra argument.
11374 * @param a3 The fourth extra argument.
11375 */
11376#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11377
11378/**
11379 * Defers the rest of the instruction emulation to a C implementation routine
11380 * and returns, taking two arguments in addition to the standard ones.
11381 *
11382 * @param a_pfnCImpl The pointer to the C routine.
11383 * @param a0 The first extra argument.
11384 * @param a1 The second extra argument.
11385 * @param a2 The third extra argument.
11386 * @param a3 The fourth extra argument.
11387 * @param a4 The fifth extra argument.
11388 */
11389#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11390
11391/**
11392 * Defers the entire instruction emulation to a C implementation routine and
11393 * returns, only taking the standard parameters.
11394 *
11395 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11396 *
11397 * @param a_pfnCImpl The pointer to the C routine.
11398 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11399 */
11400#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11401
11402/**
11403 * Defers the entire instruction emulation to a C implementation routine and
11404 * returns, taking one argument in addition to the standard ones.
11405 *
11406 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11407 *
11408 * @param a_pfnCImpl The pointer to the C routine.
11409 * @param a0 The argument.
11410 */
11411#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11412
11413/**
11414 * Defers the entire instruction emulation to a C implementation routine and
11415 * returns, taking two arguments in addition to the standard ones.
11416 *
11417 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11418 *
11419 * @param a_pfnCImpl The pointer to the C routine.
11420 * @param a0 The first extra argument.
11421 * @param a1 The second extra argument.
11422 */
11423#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11424
11425/**
11426 * Defers the entire instruction emulation to a C implementation routine and
11427 * returns, taking three arguments in addition to the standard ones.
11428 *
11429 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11430 *
11431 * @param a_pfnCImpl The pointer to the C routine.
11432 * @param a0 The first extra argument.
11433 * @param a1 The second extra argument.
11434 * @param a2 The third extra argument.
11435 */
11436#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11437
11438/**
11439 * Calls a FPU assembly implementation taking one visible argument.
11440 *
11441 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11442 * @param a0 The first extra argument.
11443 */
11444#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11445 do { \
11446 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11447 } while (0)
11448
11449/**
11450 * Calls a FPU assembly implementation taking two visible arguments.
11451 *
11452 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11453 * @param a0 The first extra argument.
11454 * @param a1 The second extra argument.
11455 */
11456#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11457 do { \
11458 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11459 } while (0)
11460
11461/**
11462 * Calls a FPU assembly implementation taking three visible arguments.
11463 *
11464 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11465 * @param a0 The first extra argument.
11466 * @param a1 The second extra argument.
11467 * @param a2 The third extra argument.
11468 */
11469#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11470 do { \
11471 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11472 } while (0)
11473
11474#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11475 do { \
11476 (a_FpuData).FSW = (a_FSW); \
11477 (a_FpuData).r80Result = *(a_pr80Value); \
11478 } while (0)
11479
11480/** Pushes FPU result onto the stack. */
11481#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11482 iemFpuPushResult(pVCpu, &a_FpuData)
11483/** Pushes FPU result onto the stack and sets the FPUDP. */
11484#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11485 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11486
11487/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11488#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11489 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11490
11491/** Stores FPU result in a stack register. */
11492#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11493 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11494/** Stores FPU result in a stack register and pops the stack. */
11495#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11496 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11497/** Stores FPU result in a stack register and sets the FPUDP. */
11498#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11499 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11500/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11501 * stack. */
11502#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11503 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11504
11505/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11506#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11507 iemFpuUpdateOpcodeAndIp(pVCpu)
11508/** Free a stack register (for FFREE and FFREEP). */
11509#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11510 iemFpuStackFree(pVCpu, a_iStReg)
11511/** Increment the FPU stack pointer. */
11512#define IEM_MC_FPU_STACK_INC_TOP() \
11513 iemFpuStackIncTop(pVCpu)
11514/** Decrement the FPU stack pointer. */
11515#define IEM_MC_FPU_STACK_DEC_TOP() \
11516 iemFpuStackDecTop(pVCpu)
11517
11518/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11519#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11520 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11521/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11522#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11523 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11524/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11525#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11526 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11527/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11528#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11529 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11530/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11531 * stack. */
11532#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11533 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11534/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11535#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11536 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11537
11538/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11539#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11540 iemFpuStackUnderflow(pVCpu, a_iStDst)
11541/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11542 * stack. */
11543#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11544 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11545/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11546 * FPUDS. */
11547#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11548 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11549/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11550 * FPUDS. Pops stack. */
11551#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11552 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11553/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11554 * stack twice. */
11555#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11556 iemFpuStackUnderflowThenPopPop(pVCpu)
11557/** Raises a FPU stack underflow exception for an instruction pushing a result
11558 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11559#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11560 iemFpuStackPushUnderflow(pVCpu)
11561/** Raises a FPU stack underflow exception for an instruction pushing a result
11562 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11563#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11564 iemFpuStackPushUnderflowTwo(pVCpu)
11565
11566/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11567 * FPUIP, FPUCS and FOP. */
11568#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11569 iemFpuStackPushOverflow(pVCpu)
11570/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11571 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11572#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11573 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11574/** Prepares for using the FPU state.
11575 * Ensures that we can use the host FPU in the current context (RC+R0.
11576 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11577#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11578/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11579#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11580/** Actualizes the guest FPU state so it can be accessed and modified. */
11581#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11582
11583/** Prepares for using the SSE state.
11584 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11585 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11586#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11587/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
11588#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11589/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
11590#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11591
11592/**
11593 * Calls a MMX assembly implementation taking two visible arguments.
11594 *
11595 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11596 * @param a0 The first extra argument.
11597 * @param a1 The second extra argument.
11598 */
11599#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11600 do { \
11601 IEM_MC_PREPARE_FPU_USAGE(); \
11602 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11603 } while (0)
11604
11605/**
11606 * Calls a MMX assembly implementation taking three visible arguments.
11607 *
11608 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11609 * @param a0 The first extra argument.
11610 * @param a1 The second extra argument.
11611 * @param a2 The third extra argument.
11612 */
11613#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11614 do { \
11615 IEM_MC_PREPARE_FPU_USAGE(); \
11616 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11617 } while (0)
11618
11619
11620/**
11621 * Calls a SSE assembly implementation taking two visible arguments.
11622 *
11623 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11624 * @param a0 The first extra argument.
11625 * @param a1 The second extra argument.
11626 */
11627#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11628 do { \
11629 IEM_MC_PREPARE_SSE_USAGE(); \
11630 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11631 } while (0)
11632
11633/**
11634 * Calls a SSE assembly implementation taking three visible arguments.
11635 *
11636 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11637 * @param a0 The first extra argument.
11638 * @param a1 The second extra argument.
11639 * @param a2 The third extra argument.
11640 */
11641#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11642 do { \
11643 IEM_MC_PREPARE_SSE_USAGE(); \
11644 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11645 } while (0)
11646
11647/** @note Not for IOPL or IF testing. */
11648#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11649/** @note Not for IOPL or IF testing. */
11650#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11651/** @note Not for IOPL or IF testing. */
11652#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11653/** @note Not for IOPL or IF testing. */
11654#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11655/** @note Not for IOPL or IF testing. */
11656#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11657 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11658 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11659/** @note Not for IOPL or IF testing. */
11660#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11661 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11662 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11663/** @note Not for IOPL or IF testing. */
11664#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11665 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11666 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11667 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11668/** @note Not for IOPL or IF testing. */
11669#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11670 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11671 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11672 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11673#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11674#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11675#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11676/** @note Not for IOPL or IF testing. */
11677#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11678 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11679 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11680/** @note Not for IOPL or IF testing. */
11681#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11682 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11683 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11684/** @note Not for IOPL or IF testing. */
11685#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11686 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11687 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11688/** @note Not for IOPL or IF testing. */
11689#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11690 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11691 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11692/** @note Not for IOPL or IF testing. */
11693#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11694 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11695 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11696/** @note Not for IOPL or IF testing. */
11697#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11698 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11699 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11700#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11701#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11702
11703#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11704 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11705#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11706 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11707#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11708 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11709#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11710 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11711#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11712 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11713#define IEM_MC_IF_FCW_IM() \
11714 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11715
11716#define IEM_MC_ELSE() } else {
11717#define IEM_MC_ENDIF() } do {} while (0)
11718
11719/** @} */
11720
11721
11722/** @name Opcode Debug Helpers.
11723 * @{
11724 */
11725#ifdef VBOX_WITH_STATISTICS
11726# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11727#else
11728# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11729#endif
11730
11731#ifdef DEBUG
11732# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11733 do { \
11734 IEMOP_INC_STATS(a_Stats); \
11735 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11736 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11737 } while (0)
11738
11739# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11740 do { \
11741 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11742 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11743 (void)RT_CONCAT(OP_,a_Upper); \
11744 (void)(a_fDisHints); \
11745 (void)(a_fIemHints); \
11746 } while (0)
11747
11748# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11749 do { \
11750 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11751 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11752 (void)RT_CONCAT(OP_,a_Upper); \
11753 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11754 (void)(a_fDisHints); \
11755 (void)(a_fIemHints); \
11756 } while (0)
11757
11758# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11759 do { \
11760 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11761 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11762 (void)RT_CONCAT(OP_,a_Upper); \
11763 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11764 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11765 (void)(a_fDisHints); \
11766 (void)(a_fIemHints); \
11767 } while (0)
11768
11769# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11770 do { \
11771 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11772 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11773 (void)RT_CONCAT(OP_,a_Upper); \
11774 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11775 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11776 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11777 (void)(a_fDisHints); \
11778 (void)(a_fIemHints); \
11779 } while (0)
11780
11781# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11782 do { \
11783 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11784 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11785 (void)RT_CONCAT(OP_,a_Upper); \
11786 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11787 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11788 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11789 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11790 (void)(a_fDisHints); \
11791 (void)(a_fIemHints); \
11792 } while (0)
11793
11794#else
11795# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11796
11797# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11798 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11799# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11800 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11801# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11802 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11803# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11804 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11805# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11806 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11807
11808#endif
11809
11810#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11811 IEMOP_MNEMONIC0EX(a_Lower, \
11812 #a_Lower, \
11813 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
11814#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11815 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
11816 #a_Lower " " #a_Op1, \
11817 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
11818#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11819 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
11820 #a_Lower " " #a_Op1 "," #a_Op2, \
11821 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
11822#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11823 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
11824 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
11825 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
11826#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11827 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
11828 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
11829 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
11830
11831/** @} */
11832
11833
11834/** @name Opcode Helpers.
11835 * @{
11836 */
11837
11838#ifdef IN_RING3
11839# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11840 do { \
11841 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11842 else \
11843 { \
11844 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11845 return IEMOP_RAISE_INVALID_OPCODE(); \
11846 } \
11847 } while (0)
11848#else
11849# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11850 do { \
11851 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11852 else return IEMOP_RAISE_INVALID_OPCODE(); \
11853 } while (0)
11854#endif
11855
11856/** The instruction requires a 186 or later. */
11857#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11858# define IEMOP_HLP_MIN_186() do { } while (0)
11859#else
11860# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11861#endif
11862
11863/** The instruction requires a 286 or later. */
11864#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11865# define IEMOP_HLP_MIN_286() do { } while (0)
11866#else
11867# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11868#endif
11869
11870/** The instruction requires a 386 or later. */
11871#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11872# define IEMOP_HLP_MIN_386() do { } while (0)
11873#else
11874# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11875#endif
11876
11877/** The instruction requires a 386 or later if the given expression is true. */
11878#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11879# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11880#else
11881# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11882#endif
11883
11884/** The instruction requires a 486 or later. */
11885#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11886# define IEMOP_HLP_MIN_486() do { } while (0)
11887#else
11888# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11889#endif
11890
11891/** The instruction requires a Pentium (586) or later. */
11892#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11893# define IEMOP_HLP_MIN_586() do { } while (0)
11894#else
11895# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11896#endif
11897
11898/** The instruction requires a PentiumPro (686) or later. */
11899#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11900# define IEMOP_HLP_MIN_686() do { } while (0)
11901#else
11902# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11903#endif
11904
11905
11906/** The instruction raises an \#UD in real and V8086 mode. */
11907#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11908 do \
11909 { \
11910 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
11911 else return IEMOP_RAISE_INVALID_OPCODE(); \
11912 } while (0)
11913
11914/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11915 * 64-bit mode. */
11916#define IEMOP_HLP_NO_64BIT() \
11917 do \
11918 { \
11919 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11920 return IEMOP_RAISE_INVALID_OPCODE(); \
11921 } while (0)
11922
11923/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11924 * 64-bit mode. */
11925#define IEMOP_HLP_ONLY_64BIT() \
11926 do \
11927 { \
11928 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11929 return IEMOP_RAISE_INVALID_OPCODE(); \
11930 } while (0)
11931
11932/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11933#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11934 do \
11935 { \
11936 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11937 iemRecalEffOpSize64Default(pVCpu); \
11938 } while (0)
11939
11940/** The instruction has 64-bit operand size if 64-bit mode. */
11941#define IEMOP_HLP_64BIT_OP_SIZE() \
11942 do \
11943 { \
11944 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11945 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11946 } while (0)
11947
11948/** Only a REX prefix immediately preceeding the first opcode byte takes
11949 * effect. This macro helps ensuring this as well as logging bad guest code. */
11950#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11951 do \
11952 { \
11953 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11954 { \
11955 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11956 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11957 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11958 pVCpu->iem.s.uRexB = 0; \
11959 pVCpu->iem.s.uRexIndex = 0; \
11960 pVCpu->iem.s.uRexReg = 0; \
11961 iemRecalEffOpSize(pVCpu); \
11962 } \
11963 } while (0)
11964
11965/**
11966 * Done decoding.
11967 */
11968#define IEMOP_HLP_DONE_DECODING() \
11969 do \
11970 { \
11971 /*nothing for now, maybe later... */ \
11972 } while (0)
11973
11974/**
11975 * Done decoding, raise \#UD exception if lock prefix present.
11976 */
11977#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11978 do \
11979 { \
11980 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11981 { /* likely */ } \
11982 else \
11983 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11984 } while (0)
11985
11986#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11987 do \
11988 { \
11989 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11990 { /* likely */ } \
11991 else \
11992 { \
11993 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11994 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11995 } \
11996 } while (0)
11997#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11998 do \
11999 { \
12000 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12001 { /* likely */ } \
12002 else \
12003 { \
12004 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12005 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12006 } \
12007 } while (0)
12008
12009/**
12010 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12011 * are present.
12012 */
12013#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12014 do \
12015 { \
12016 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12017 { /* likely */ } \
12018 else \
12019 return IEMOP_RAISE_INVALID_OPCODE(); \
12020 } while (0)
12021
12022
12023/**
12024 * Done decoding VEX.
12025 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12026 * we're in real or v8086 mode.
12027 */
12028#define IEMOP_HLP_DONE_VEX_DECODING() \
12029 do \
12030 { \
12031 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12032 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12033 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12034 { /* likely */ } \
12035 else \
12036 return IEMOP_RAISE_INVALID_OPCODE(); \
12037 } while (0)
12038
12039/**
12040 * Done decoding VEX, no V, no L.
12041 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12042 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12043 */
12044#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12045 do \
12046 { \
12047 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12048 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12049 && pVCpu->iem.s.uVexLength == 0 \
12050 && pVCpu->iem.s.uVex3rdReg == 0 \
12051 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12052 { /* likely */ } \
12053 else \
12054 return IEMOP_RAISE_INVALID_OPCODE(); \
12055 } while (0)
12056
12057#ifdef VBOX_WITH_NESTED_HWVIRT
12058/** Check and handles SVM nested-guest control & instruction intercept. */
12059# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12060 do \
12061 { \
12062 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12063 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12064 } while (0)
12065
12066/** Check and handle SVM nested-guest CR0 read intercept. */
12067# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12068 do \
12069 { \
12070 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12071 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12072 } while (0)
12073
12074#else
12075# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12076# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12077
12078#endif /* VBOX_WITH_NESTED_HWVIRT */
12079
12080
12081/**
12082 * Calculates the effective address of a ModR/M memory operand.
12083 *
12084 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12085 *
12086 * @return Strict VBox status code.
12087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12088 * @param bRm The ModRM byte.
12089 * @param cbImm The size of any immediate following the
12090 * effective address opcode bytes. Important for
12091 * RIP relative addressing.
12092 * @param pGCPtrEff Where to return the effective address.
12093 */
12094IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12095{
12096 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12097 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12098# define SET_SS_DEF() \
12099 do \
12100 { \
12101 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12102 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12103 } while (0)
12104
12105 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12106 {
12107/** @todo Check the effective address size crap! */
12108 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12109 {
12110 uint16_t u16EffAddr;
12111
12112 /* Handle the disp16 form with no registers first. */
12113 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12114 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12115 else
12116 {
12117 /* Get the displacment. */
12118 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12119 {
12120 case 0: u16EffAddr = 0; break;
12121 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12122 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12123 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12124 }
12125
12126 /* Add the base and index registers to the disp. */
12127 switch (bRm & X86_MODRM_RM_MASK)
12128 {
12129 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12130 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12131 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12132 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12133 case 4: u16EffAddr += pCtx->si; break;
12134 case 5: u16EffAddr += pCtx->di; break;
12135 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12136 case 7: u16EffAddr += pCtx->bx; break;
12137 }
12138 }
12139
12140 *pGCPtrEff = u16EffAddr;
12141 }
12142 else
12143 {
12144 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12145 uint32_t u32EffAddr;
12146
12147 /* Handle the disp32 form with no registers first. */
12148 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12149 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12150 else
12151 {
12152 /* Get the register (or SIB) value. */
12153 switch ((bRm & X86_MODRM_RM_MASK))
12154 {
12155 case 0: u32EffAddr = pCtx->eax; break;
12156 case 1: u32EffAddr = pCtx->ecx; break;
12157 case 2: u32EffAddr = pCtx->edx; break;
12158 case 3: u32EffAddr = pCtx->ebx; break;
12159 case 4: /* SIB */
12160 {
12161 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12162
12163 /* Get the index and scale it. */
12164 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12165 {
12166 case 0: u32EffAddr = pCtx->eax; break;
12167 case 1: u32EffAddr = pCtx->ecx; break;
12168 case 2: u32EffAddr = pCtx->edx; break;
12169 case 3: u32EffAddr = pCtx->ebx; break;
12170 case 4: u32EffAddr = 0; /*none */ break;
12171 case 5: u32EffAddr = pCtx->ebp; break;
12172 case 6: u32EffAddr = pCtx->esi; break;
12173 case 7: u32EffAddr = pCtx->edi; break;
12174 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12175 }
12176 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12177
12178 /* add base */
12179 switch (bSib & X86_SIB_BASE_MASK)
12180 {
12181 case 0: u32EffAddr += pCtx->eax; break;
12182 case 1: u32EffAddr += pCtx->ecx; break;
12183 case 2: u32EffAddr += pCtx->edx; break;
12184 case 3: u32EffAddr += pCtx->ebx; break;
12185 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12186 case 5:
12187 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12188 {
12189 u32EffAddr += pCtx->ebp;
12190 SET_SS_DEF();
12191 }
12192 else
12193 {
12194 uint32_t u32Disp;
12195 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12196 u32EffAddr += u32Disp;
12197 }
12198 break;
12199 case 6: u32EffAddr += pCtx->esi; break;
12200 case 7: u32EffAddr += pCtx->edi; break;
12201 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12202 }
12203 break;
12204 }
12205 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12206 case 6: u32EffAddr = pCtx->esi; break;
12207 case 7: u32EffAddr = pCtx->edi; break;
12208 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12209 }
12210
12211 /* Get and add the displacement. */
12212 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12213 {
12214 case 0:
12215 break;
12216 case 1:
12217 {
12218 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12219 u32EffAddr += i8Disp;
12220 break;
12221 }
12222 case 2:
12223 {
12224 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12225 u32EffAddr += u32Disp;
12226 break;
12227 }
12228 default:
12229 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12230 }
12231
12232 }
12233 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12234 *pGCPtrEff = u32EffAddr;
12235 else
12236 {
12237 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12238 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12239 }
12240 }
12241 }
12242 else
12243 {
12244 uint64_t u64EffAddr;
12245
12246 /* Handle the rip+disp32 form with no registers first. */
12247 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12248 {
12249 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12250 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12251 }
12252 else
12253 {
12254 /* Get the register (or SIB) value. */
12255 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12256 {
12257 case 0: u64EffAddr = pCtx->rax; break;
12258 case 1: u64EffAddr = pCtx->rcx; break;
12259 case 2: u64EffAddr = pCtx->rdx; break;
12260 case 3: u64EffAddr = pCtx->rbx; break;
12261 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12262 case 6: u64EffAddr = pCtx->rsi; break;
12263 case 7: u64EffAddr = pCtx->rdi; break;
12264 case 8: u64EffAddr = pCtx->r8; break;
12265 case 9: u64EffAddr = pCtx->r9; break;
12266 case 10: u64EffAddr = pCtx->r10; break;
12267 case 11: u64EffAddr = pCtx->r11; break;
12268 case 13: u64EffAddr = pCtx->r13; break;
12269 case 14: u64EffAddr = pCtx->r14; break;
12270 case 15: u64EffAddr = pCtx->r15; break;
12271 /* SIB */
12272 case 4:
12273 case 12:
12274 {
12275 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12276
12277 /* Get the index and scale it. */
12278 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12279 {
12280 case 0: u64EffAddr = pCtx->rax; break;
12281 case 1: u64EffAddr = pCtx->rcx; break;
12282 case 2: u64EffAddr = pCtx->rdx; break;
12283 case 3: u64EffAddr = pCtx->rbx; break;
12284 case 4: u64EffAddr = 0; /*none */ break;
12285 case 5: u64EffAddr = pCtx->rbp; break;
12286 case 6: u64EffAddr = pCtx->rsi; break;
12287 case 7: u64EffAddr = pCtx->rdi; break;
12288 case 8: u64EffAddr = pCtx->r8; break;
12289 case 9: u64EffAddr = pCtx->r9; break;
12290 case 10: u64EffAddr = pCtx->r10; break;
12291 case 11: u64EffAddr = pCtx->r11; break;
12292 case 12: u64EffAddr = pCtx->r12; break;
12293 case 13: u64EffAddr = pCtx->r13; break;
12294 case 14: u64EffAddr = pCtx->r14; break;
12295 case 15: u64EffAddr = pCtx->r15; break;
12296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12297 }
12298 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12299
12300 /* add base */
12301 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12302 {
12303 case 0: u64EffAddr += pCtx->rax; break;
12304 case 1: u64EffAddr += pCtx->rcx; break;
12305 case 2: u64EffAddr += pCtx->rdx; break;
12306 case 3: u64EffAddr += pCtx->rbx; break;
12307 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12308 case 6: u64EffAddr += pCtx->rsi; break;
12309 case 7: u64EffAddr += pCtx->rdi; break;
12310 case 8: u64EffAddr += pCtx->r8; break;
12311 case 9: u64EffAddr += pCtx->r9; break;
12312 case 10: u64EffAddr += pCtx->r10; break;
12313 case 11: u64EffAddr += pCtx->r11; break;
12314 case 12: u64EffAddr += pCtx->r12; break;
12315 case 14: u64EffAddr += pCtx->r14; break;
12316 case 15: u64EffAddr += pCtx->r15; break;
12317 /* complicated encodings */
12318 case 5:
12319 case 13:
12320 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12321 {
12322 if (!pVCpu->iem.s.uRexB)
12323 {
12324 u64EffAddr += pCtx->rbp;
12325 SET_SS_DEF();
12326 }
12327 else
12328 u64EffAddr += pCtx->r13;
12329 }
12330 else
12331 {
12332 uint32_t u32Disp;
12333 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12334 u64EffAddr += (int32_t)u32Disp;
12335 }
12336 break;
12337 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12338 }
12339 break;
12340 }
12341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12342 }
12343
12344 /* Get and add the displacement. */
12345 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12346 {
12347 case 0:
12348 break;
12349 case 1:
12350 {
12351 int8_t i8Disp;
12352 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12353 u64EffAddr += i8Disp;
12354 break;
12355 }
12356 case 2:
12357 {
12358 uint32_t u32Disp;
12359 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12360 u64EffAddr += (int32_t)u32Disp;
12361 break;
12362 }
12363 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12364 }
12365
12366 }
12367
12368 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12369 *pGCPtrEff = u64EffAddr;
12370 else
12371 {
12372 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12373 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12374 }
12375 }
12376
12377 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12378 return VINF_SUCCESS;
12379}
12380
12381
12382/**
12383 * Calculates the effective address of a ModR/M memory operand.
12384 *
12385 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12386 *
12387 * @return Strict VBox status code.
12388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12389 * @param bRm The ModRM byte.
12390 * @param cbImm The size of any immediate following the
12391 * effective address opcode bytes. Important for
12392 * RIP relative addressing.
12393 * @param pGCPtrEff Where to return the effective address.
12394 * @param offRsp RSP displacement.
12395 */
12396IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12397{
12398 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12399 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12400# define SET_SS_DEF() \
12401 do \
12402 { \
12403 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12404 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12405 } while (0)
12406
12407 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12408 {
12409/** @todo Check the effective address size crap! */
12410 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12411 {
12412 uint16_t u16EffAddr;
12413
12414 /* Handle the disp16 form with no registers first. */
12415 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12416 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12417 else
12418 {
12419 /* Get the displacment. */
12420 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12421 {
12422 case 0: u16EffAddr = 0; break;
12423 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12424 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12425 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12426 }
12427
12428 /* Add the base and index registers to the disp. */
12429 switch (bRm & X86_MODRM_RM_MASK)
12430 {
12431 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12432 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12433 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12434 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12435 case 4: u16EffAddr += pCtx->si; break;
12436 case 5: u16EffAddr += pCtx->di; break;
12437 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12438 case 7: u16EffAddr += pCtx->bx; break;
12439 }
12440 }
12441
12442 *pGCPtrEff = u16EffAddr;
12443 }
12444 else
12445 {
12446 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12447 uint32_t u32EffAddr;
12448
12449 /* Handle the disp32 form with no registers first. */
12450 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12451 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12452 else
12453 {
12454 /* Get the register (or SIB) value. */
12455 switch ((bRm & X86_MODRM_RM_MASK))
12456 {
12457 case 0: u32EffAddr = pCtx->eax; break;
12458 case 1: u32EffAddr = pCtx->ecx; break;
12459 case 2: u32EffAddr = pCtx->edx; break;
12460 case 3: u32EffAddr = pCtx->ebx; break;
12461 case 4: /* SIB */
12462 {
12463 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12464
12465 /* Get the index and scale it. */
12466 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12467 {
12468 case 0: u32EffAddr = pCtx->eax; break;
12469 case 1: u32EffAddr = pCtx->ecx; break;
12470 case 2: u32EffAddr = pCtx->edx; break;
12471 case 3: u32EffAddr = pCtx->ebx; break;
12472 case 4: u32EffAddr = 0; /*none */ break;
12473 case 5: u32EffAddr = pCtx->ebp; break;
12474 case 6: u32EffAddr = pCtx->esi; break;
12475 case 7: u32EffAddr = pCtx->edi; break;
12476 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12477 }
12478 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12479
12480 /* add base */
12481 switch (bSib & X86_SIB_BASE_MASK)
12482 {
12483 case 0: u32EffAddr += pCtx->eax; break;
12484 case 1: u32EffAddr += pCtx->ecx; break;
12485 case 2: u32EffAddr += pCtx->edx; break;
12486 case 3: u32EffAddr += pCtx->ebx; break;
12487 case 4:
12488 u32EffAddr += pCtx->esp + offRsp;
12489 SET_SS_DEF();
12490 break;
12491 case 5:
12492 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12493 {
12494 u32EffAddr += pCtx->ebp;
12495 SET_SS_DEF();
12496 }
12497 else
12498 {
12499 uint32_t u32Disp;
12500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12501 u32EffAddr += u32Disp;
12502 }
12503 break;
12504 case 6: u32EffAddr += pCtx->esi; break;
12505 case 7: u32EffAddr += pCtx->edi; break;
12506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12507 }
12508 break;
12509 }
12510 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12511 case 6: u32EffAddr = pCtx->esi; break;
12512 case 7: u32EffAddr = pCtx->edi; break;
12513 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12514 }
12515
12516 /* Get and add the displacement. */
12517 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12518 {
12519 case 0:
12520 break;
12521 case 1:
12522 {
12523 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12524 u32EffAddr += i8Disp;
12525 break;
12526 }
12527 case 2:
12528 {
12529 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12530 u32EffAddr += u32Disp;
12531 break;
12532 }
12533 default:
12534 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12535 }
12536
12537 }
12538 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12539 *pGCPtrEff = u32EffAddr;
12540 else
12541 {
12542 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12543 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12544 }
12545 }
12546 }
12547 else
12548 {
12549 uint64_t u64EffAddr;
12550
12551 /* Handle the rip+disp32 form with no registers first. */
12552 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12553 {
12554 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12555 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12556 }
12557 else
12558 {
12559 /* Get the register (or SIB) value. */
12560 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12561 {
12562 case 0: u64EffAddr = pCtx->rax; break;
12563 case 1: u64EffAddr = pCtx->rcx; break;
12564 case 2: u64EffAddr = pCtx->rdx; break;
12565 case 3: u64EffAddr = pCtx->rbx; break;
12566 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12567 case 6: u64EffAddr = pCtx->rsi; break;
12568 case 7: u64EffAddr = pCtx->rdi; break;
12569 case 8: u64EffAddr = pCtx->r8; break;
12570 case 9: u64EffAddr = pCtx->r9; break;
12571 case 10: u64EffAddr = pCtx->r10; break;
12572 case 11: u64EffAddr = pCtx->r11; break;
12573 case 13: u64EffAddr = pCtx->r13; break;
12574 case 14: u64EffAddr = pCtx->r14; break;
12575 case 15: u64EffAddr = pCtx->r15; break;
12576 /* SIB */
12577 case 4:
12578 case 12:
12579 {
12580 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12581
12582 /* Get the index and scale it. */
12583 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12584 {
12585 case 0: u64EffAddr = pCtx->rax; break;
12586 case 1: u64EffAddr = pCtx->rcx; break;
12587 case 2: u64EffAddr = pCtx->rdx; break;
12588 case 3: u64EffAddr = pCtx->rbx; break;
12589 case 4: u64EffAddr = 0; /*none */ break;
12590 case 5: u64EffAddr = pCtx->rbp; break;
12591 case 6: u64EffAddr = pCtx->rsi; break;
12592 case 7: u64EffAddr = pCtx->rdi; break;
12593 case 8: u64EffAddr = pCtx->r8; break;
12594 case 9: u64EffAddr = pCtx->r9; break;
12595 case 10: u64EffAddr = pCtx->r10; break;
12596 case 11: u64EffAddr = pCtx->r11; break;
12597 case 12: u64EffAddr = pCtx->r12; break;
12598 case 13: u64EffAddr = pCtx->r13; break;
12599 case 14: u64EffAddr = pCtx->r14; break;
12600 case 15: u64EffAddr = pCtx->r15; break;
12601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12602 }
12603 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12604
12605 /* add base */
12606 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12607 {
12608 case 0: u64EffAddr += pCtx->rax; break;
12609 case 1: u64EffAddr += pCtx->rcx; break;
12610 case 2: u64EffAddr += pCtx->rdx; break;
12611 case 3: u64EffAddr += pCtx->rbx; break;
12612 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12613 case 6: u64EffAddr += pCtx->rsi; break;
12614 case 7: u64EffAddr += pCtx->rdi; break;
12615 case 8: u64EffAddr += pCtx->r8; break;
12616 case 9: u64EffAddr += pCtx->r9; break;
12617 case 10: u64EffAddr += pCtx->r10; break;
12618 case 11: u64EffAddr += pCtx->r11; break;
12619 case 12: u64EffAddr += pCtx->r12; break;
12620 case 14: u64EffAddr += pCtx->r14; break;
12621 case 15: u64EffAddr += pCtx->r15; break;
12622 /* complicated encodings */
12623 case 5:
12624 case 13:
12625 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12626 {
12627 if (!pVCpu->iem.s.uRexB)
12628 {
12629 u64EffAddr += pCtx->rbp;
12630 SET_SS_DEF();
12631 }
12632 else
12633 u64EffAddr += pCtx->r13;
12634 }
12635 else
12636 {
12637 uint32_t u32Disp;
12638 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12639 u64EffAddr += (int32_t)u32Disp;
12640 }
12641 break;
12642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12643 }
12644 break;
12645 }
12646 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12647 }
12648
12649 /* Get and add the displacement. */
12650 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12651 {
12652 case 0:
12653 break;
12654 case 1:
12655 {
12656 int8_t i8Disp;
12657 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12658 u64EffAddr += i8Disp;
12659 break;
12660 }
12661 case 2:
12662 {
12663 uint32_t u32Disp;
12664 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12665 u64EffAddr += (int32_t)u32Disp;
12666 break;
12667 }
12668 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12669 }
12670
12671 }
12672
12673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12674 *pGCPtrEff = u64EffAddr;
12675 else
12676 {
12677 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12678 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12679 }
12680 }
12681
12682 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12683 return VINF_SUCCESS;
12684}
12685
12686
12687#ifdef IEM_WITH_SETJMP
12688/**
12689 * Calculates the effective address of a ModR/M memory operand.
12690 *
12691 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12692 *
12693 * May longjmp on internal error.
12694 *
12695 * @return The effective address.
12696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12697 * @param bRm The ModRM byte.
12698 * @param cbImm The size of any immediate following the
12699 * effective address opcode bytes. Important for
12700 * RIP relative addressing.
12701 */
12702IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12703{
12704 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12705 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12706# define SET_SS_DEF() \
12707 do \
12708 { \
12709 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12710 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12711 } while (0)
12712
12713 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12714 {
12715/** @todo Check the effective address size crap! */
12716 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12717 {
12718 uint16_t u16EffAddr;
12719
12720 /* Handle the disp16 form with no registers first. */
12721 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12722 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12723 else
12724 {
12725 /* Get the displacment. */
12726 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12727 {
12728 case 0: u16EffAddr = 0; break;
12729 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12730 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12731 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12732 }
12733
12734 /* Add the base and index registers to the disp. */
12735 switch (bRm & X86_MODRM_RM_MASK)
12736 {
12737 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12738 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12739 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12740 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12741 case 4: u16EffAddr += pCtx->si; break;
12742 case 5: u16EffAddr += pCtx->di; break;
12743 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12744 case 7: u16EffAddr += pCtx->bx; break;
12745 }
12746 }
12747
12748 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12749 return u16EffAddr;
12750 }
12751
12752 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12753 uint32_t u32EffAddr;
12754
12755 /* Handle the disp32 form with no registers first. */
12756 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12757 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12758 else
12759 {
12760 /* Get the register (or SIB) value. */
12761 switch ((bRm & X86_MODRM_RM_MASK))
12762 {
12763 case 0: u32EffAddr = pCtx->eax; break;
12764 case 1: u32EffAddr = pCtx->ecx; break;
12765 case 2: u32EffAddr = pCtx->edx; break;
12766 case 3: u32EffAddr = pCtx->ebx; break;
12767 case 4: /* SIB */
12768 {
12769 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12770
12771 /* Get the index and scale it. */
12772 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12773 {
12774 case 0: u32EffAddr = pCtx->eax; break;
12775 case 1: u32EffAddr = pCtx->ecx; break;
12776 case 2: u32EffAddr = pCtx->edx; break;
12777 case 3: u32EffAddr = pCtx->ebx; break;
12778 case 4: u32EffAddr = 0; /*none */ break;
12779 case 5: u32EffAddr = pCtx->ebp; break;
12780 case 6: u32EffAddr = pCtx->esi; break;
12781 case 7: u32EffAddr = pCtx->edi; break;
12782 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12783 }
12784 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12785
12786 /* add base */
12787 switch (bSib & X86_SIB_BASE_MASK)
12788 {
12789 case 0: u32EffAddr += pCtx->eax; break;
12790 case 1: u32EffAddr += pCtx->ecx; break;
12791 case 2: u32EffAddr += pCtx->edx; break;
12792 case 3: u32EffAddr += pCtx->ebx; break;
12793 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12794 case 5:
12795 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12796 {
12797 u32EffAddr += pCtx->ebp;
12798 SET_SS_DEF();
12799 }
12800 else
12801 {
12802 uint32_t u32Disp;
12803 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12804 u32EffAddr += u32Disp;
12805 }
12806 break;
12807 case 6: u32EffAddr += pCtx->esi; break;
12808 case 7: u32EffAddr += pCtx->edi; break;
12809 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12810 }
12811 break;
12812 }
12813 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12814 case 6: u32EffAddr = pCtx->esi; break;
12815 case 7: u32EffAddr = pCtx->edi; break;
12816 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12817 }
12818
12819 /* Get and add the displacement. */
12820 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12821 {
12822 case 0:
12823 break;
12824 case 1:
12825 {
12826 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12827 u32EffAddr += i8Disp;
12828 break;
12829 }
12830 case 2:
12831 {
12832 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12833 u32EffAddr += u32Disp;
12834 break;
12835 }
12836 default:
12837 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12838 }
12839 }
12840
12841 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12842 {
12843 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12844 return u32EffAddr;
12845 }
12846 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12847 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12848 return u32EffAddr & UINT16_MAX;
12849 }
12850
12851 uint64_t u64EffAddr;
12852
12853 /* Handle the rip+disp32 form with no registers first. */
12854 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12855 {
12856 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12857 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12858 }
12859 else
12860 {
12861 /* Get the register (or SIB) value. */
12862 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12863 {
12864 case 0: u64EffAddr = pCtx->rax; break;
12865 case 1: u64EffAddr = pCtx->rcx; break;
12866 case 2: u64EffAddr = pCtx->rdx; break;
12867 case 3: u64EffAddr = pCtx->rbx; break;
12868 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12869 case 6: u64EffAddr = pCtx->rsi; break;
12870 case 7: u64EffAddr = pCtx->rdi; break;
12871 case 8: u64EffAddr = pCtx->r8; break;
12872 case 9: u64EffAddr = pCtx->r9; break;
12873 case 10: u64EffAddr = pCtx->r10; break;
12874 case 11: u64EffAddr = pCtx->r11; break;
12875 case 13: u64EffAddr = pCtx->r13; break;
12876 case 14: u64EffAddr = pCtx->r14; break;
12877 case 15: u64EffAddr = pCtx->r15; break;
12878 /* SIB */
12879 case 4:
12880 case 12:
12881 {
12882 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12883
12884 /* Get the index and scale it. */
12885 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12886 {
12887 case 0: u64EffAddr = pCtx->rax; break;
12888 case 1: u64EffAddr = pCtx->rcx; break;
12889 case 2: u64EffAddr = pCtx->rdx; break;
12890 case 3: u64EffAddr = pCtx->rbx; break;
12891 case 4: u64EffAddr = 0; /*none */ break;
12892 case 5: u64EffAddr = pCtx->rbp; break;
12893 case 6: u64EffAddr = pCtx->rsi; break;
12894 case 7: u64EffAddr = pCtx->rdi; break;
12895 case 8: u64EffAddr = pCtx->r8; break;
12896 case 9: u64EffAddr = pCtx->r9; break;
12897 case 10: u64EffAddr = pCtx->r10; break;
12898 case 11: u64EffAddr = pCtx->r11; break;
12899 case 12: u64EffAddr = pCtx->r12; break;
12900 case 13: u64EffAddr = pCtx->r13; break;
12901 case 14: u64EffAddr = pCtx->r14; break;
12902 case 15: u64EffAddr = pCtx->r15; break;
12903 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12904 }
12905 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12906
12907 /* add base */
12908 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12909 {
12910 case 0: u64EffAddr += pCtx->rax; break;
12911 case 1: u64EffAddr += pCtx->rcx; break;
12912 case 2: u64EffAddr += pCtx->rdx; break;
12913 case 3: u64EffAddr += pCtx->rbx; break;
12914 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12915 case 6: u64EffAddr += pCtx->rsi; break;
12916 case 7: u64EffAddr += pCtx->rdi; break;
12917 case 8: u64EffAddr += pCtx->r8; break;
12918 case 9: u64EffAddr += pCtx->r9; break;
12919 case 10: u64EffAddr += pCtx->r10; break;
12920 case 11: u64EffAddr += pCtx->r11; break;
12921 case 12: u64EffAddr += pCtx->r12; break;
12922 case 14: u64EffAddr += pCtx->r14; break;
12923 case 15: u64EffAddr += pCtx->r15; break;
12924 /* complicated encodings */
12925 case 5:
12926 case 13:
12927 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12928 {
12929 if (!pVCpu->iem.s.uRexB)
12930 {
12931 u64EffAddr += pCtx->rbp;
12932 SET_SS_DEF();
12933 }
12934 else
12935 u64EffAddr += pCtx->r13;
12936 }
12937 else
12938 {
12939 uint32_t u32Disp;
12940 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12941 u64EffAddr += (int32_t)u32Disp;
12942 }
12943 break;
12944 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12945 }
12946 break;
12947 }
12948 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12949 }
12950
12951 /* Get and add the displacement. */
12952 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12953 {
12954 case 0:
12955 break;
12956 case 1:
12957 {
12958 int8_t i8Disp;
12959 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12960 u64EffAddr += i8Disp;
12961 break;
12962 }
12963 case 2:
12964 {
12965 uint32_t u32Disp;
12966 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12967 u64EffAddr += (int32_t)u32Disp;
12968 break;
12969 }
12970 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12971 }
12972
12973 }
12974
12975 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12976 {
12977 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12978 return u64EffAddr;
12979 }
12980 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12981 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12982 return u64EffAddr & UINT32_MAX;
12983}
12984#endif /* IEM_WITH_SETJMP */
12985
12986
12987/** @} */
12988
12989
12990
12991/*
12992 * Include the instructions
12993 */
12994#include "IEMAllInstructions.cpp.h"
12995
12996
12997
12998
12999#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13000
13001/**
13002 * Sets up execution verification mode.
13003 */
13004IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13005{
13006 PVMCPU pVCpu = pVCpu;
13007 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13008
13009 /*
13010 * Always note down the address of the current instruction.
13011 */
13012 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13013 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13014
13015 /*
13016 * Enable verification and/or logging.
13017 */
13018 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13019 if ( fNewNoRem
13020 && ( 0
13021#if 0 /* auto enable on first paged protected mode interrupt */
13022 || ( pOrgCtx->eflags.Bits.u1IF
13023 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13024 && TRPMHasTrap(pVCpu)
13025 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13026#endif
13027#if 0
13028 || ( pOrgCtx->cs == 0x10
13029 && ( pOrgCtx->rip == 0x90119e3e
13030 || pOrgCtx->rip == 0x901d9810)
13031#endif
13032#if 0 /* Auto enable DSL - FPU stuff. */
13033 || ( pOrgCtx->cs == 0x10
13034 && (// pOrgCtx->rip == 0xc02ec07f
13035 //|| pOrgCtx->rip == 0xc02ec082
13036 //|| pOrgCtx->rip == 0xc02ec0c9
13037 0
13038 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13039#endif
13040#if 0 /* Auto enable DSL - fstp st0 stuff. */
13041 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13042#endif
13043#if 0
13044 || pOrgCtx->rip == 0x9022bb3a
13045#endif
13046#if 0
13047 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13048#endif
13049#if 0
13050 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13051 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13052#endif
13053#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13054 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13055 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13056 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13057#endif
13058#if 0 /* NT4SP1 - xadd early boot. */
13059 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13060#endif
13061#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13062 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13063#endif
13064#if 0 /* NT4SP1 - cmpxchg (AMD). */
13065 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13066#endif
13067#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13068 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13069#endif
13070#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13071 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13072
13073#endif
13074#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13075 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13076
13077#endif
13078#if 0 /* NT4SP1 - frstor [ecx] */
13079 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13080#endif
13081#if 0 /* xxxxxx - All long mode code. */
13082 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13083#endif
13084#if 0 /* rep movsq linux 3.7 64-bit boot. */
13085 || (pOrgCtx->rip == 0x0000000000100241)
13086#endif
13087#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13088 || (pOrgCtx->rip == 0x000000000215e240)
13089#endif
13090#if 0 /* DOS's size-overridden iret to v8086. */
13091 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13092#endif
13093 )
13094 )
13095 {
13096 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13097 RTLogFlags(NULL, "enabled");
13098 fNewNoRem = false;
13099 }
13100 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13101 {
13102 pVCpu->iem.s.fNoRem = fNewNoRem;
13103 if (!fNewNoRem)
13104 {
13105 LogAlways(("Enabling verification mode!\n"));
13106 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13107 }
13108 else
13109 LogAlways(("Disabling verification mode!\n"));
13110 }
13111
13112 /*
13113 * Switch state.
13114 */
13115 if (IEM_VERIFICATION_ENABLED(pVCpu))
13116 {
13117 static CPUMCTX s_DebugCtx; /* Ugly! */
13118
13119 s_DebugCtx = *pOrgCtx;
13120 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13121 }
13122
13123 /*
13124 * See if there is an interrupt pending in TRPM and inject it if we can.
13125 */
13126 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13127 if ( pOrgCtx->eflags.Bits.u1IF
13128 && TRPMHasTrap(pVCpu)
13129 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13130 {
13131 uint8_t u8TrapNo;
13132 TRPMEVENT enmType;
13133 RTGCUINT uErrCode;
13134 RTGCPTR uCr2;
13135 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13136 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13137 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13138 TRPMResetTrap(pVCpu);
13139 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13140 }
13141
13142 /*
13143 * Reset the counters.
13144 */
13145 pVCpu->iem.s.cIOReads = 0;
13146 pVCpu->iem.s.cIOWrites = 0;
13147 pVCpu->iem.s.fIgnoreRaxRdx = false;
13148 pVCpu->iem.s.fOverlappingMovs = false;
13149 pVCpu->iem.s.fProblematicMemory = false;
13150 pVCpu->iem.s.fUndefinedEFlags = 0;
13151
13152 if (IEM_VERIFICATION_ENABLED(pVCpu))
13153 {
13154 /*
13155 * Free all verification records.
13156 */
13157 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13158 pVCpu->iem.s.pIemEvtRecHead = NULL;
13159 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13160 do
13161 {
13162 while (pEvtRec)
13163 {
13164 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13165 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13166 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13167 pEvtRec = pNext;
13168 }
13169 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13170 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13171 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13172 } while (pEvtRec);
13173 }
13174}
13175
13176
13177/**
13178 * Allocate an event record.
13179 * @returns Pointer to a record.
13180 */
13181IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13182{
13183 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13184 return NULL;
13185
13186 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13187 if (pEvtRec)
13188 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13189 else
13190 {
13191 if (!pVCpu->iem.s.ppIemEvtRecNext)
13192 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13193
13194 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13195 if (!pEvtRec)
13196 return NULL;
13197 }
13198 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13199 pEvtRec->pNext = NULL;
13200 return pEvtRec;
13201}
13202
13203
13204/**
13205 * IOMMMIORead notification.
13206 */
13207VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13208{
13209 PVMCPU pVCpu = VMMGetCpu(pVM);
13210 if (!pVCpu)
13211 return;
13212 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13213 if (!pEvtRec)
13214 return;
13215 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13216 pEvtRec->u.RamRead.GCPhys = GCPhys;
13217 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13218 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13219 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13220}
13221
13222
13223/**
13224 * IOMMMIOWrite notification.
13225 */
13226VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13227{
13228 PVMCPU pVCpu = VMMGetCpu(pVM);
13229 if (!pVCpu)
13230 return;
13231 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13232 if (!pEvtRec)
13233 return;
13234 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13235 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13236 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13237 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13238 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13239 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13240 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13241 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13242 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13243}
13244
13245
13246/**
13247 * IOMIOPortRead notification.
13248 */
13249VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13250{
13251 PVMCPU pVCpu = VMMGetCpu(pVM);
13252 if (!pVCpu)
13253 return;
13254 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13255 if (!pEvtRec)
13256 return;
13257 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13258 pEvtRec->u.IOPortRead.Port = Port;
13259 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13260 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13261 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13262}
13263
13264/**
13265 * IOMIOPortWrite notification.
13266 */
13267VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13268{
13269 PVMCPU pVCpu = VMMGetCpu(pVM);
13270 if (!pVCpu)
13271 return;
13272 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13273 if (!pEvtRec)
13274 return;
13275 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13276 pEvtRec->u.IOPortWrite.Port = Port;
13277 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13278 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13279 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13280 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13281}
13282
13283
13284VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13285{
13286 PVMCPU pVCpu = VMMGetCpu(pVM);
13287 if (!pVCpu)
13288 return;
13289 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13290 if (!pEvtRec)
13291 return;
13292 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13293 pEvtRec->u.IOPortStrRead.Port = Port;
13294 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13295 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13296 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13297 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13298}
13299
13300
13301VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13302{
13303 PVMCPU pVCpu = VMMGetCpu(pVM);
13304 if (!pVCpu)
13305 return;
13306 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13307 if (!pEvtRec)
13308 return;
13309 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13310 pEvtRec->u.IOPortStrWrite.Port = Port;
13311 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13312 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13313 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13314 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13315}
13316
13317
13318/**
13319 * Fakes and records an I/O port read.
13320 *
13321 * @returns VINF_SUCCESS.
13322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13323 * @param Port The I/O port.
13324 * @param pu32Value Where to store the fake value.
13325 * @param cbValue The size of the access.
13326 */
13327IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13328{
13329 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13330 if (pEvtRec)
13331 {
13332 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13333 pEvtRec->u.IOPortRead.Port = Port;
13334 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13335 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13336 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13337 }
13338 pVCpu->iem.s.cIOReads++;
13339 *pu32Value = 0xcccccccc;
13340 return VINF_SUCCESS;
13341}
13342
13343
13344/**
13345 * Fakes and records an I/O port write.
13346 *
13347 * @returns VINF_SUCCESS.
13348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13349 * @param Port The I/O port.
13350 * @param u32Value The value being written.
13351 * @param cbValue The size of the access.
13352 */
13353IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13354{
13355 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13356 if (pEvtRec)
13357 {
13358 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13359 pEvtRec->u.IOPortWrite.Port = Port;
13360 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13361 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13362 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13363 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13364 }
13365 pVCpu->iem.s.cIOWrites++;
13366 return VINF_SUCCESS;
13367}
13368
13369
13370/**
13371 * Used to add extra details about a stub case.
13372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13373 */
13374IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13375{
13376 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13377 PVM pVM = pVCpu->CTX_SUFF(pVM);
13378 PVMCPU pVCpu = pVCpu;
13379 char szRegs[4096];
13380 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13381 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13382 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13383 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13384 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13385 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13386 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13387 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13388 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13389 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13390 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13391 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13392 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13393 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13394 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13395 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13396 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13397 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13398 " efer=%016VR{efer}\n"
13399 " pat=%016VR{pat}\n"
13400 " sf_mask=%016VR{sf_mask}\n"
13401 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13402 " lstar=%016VR{lstar}\n"
13403 " star=%016VR{star} cstar=%016VR{cstar}\n"
13404 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13405 );
13406
13407 char szInstr1[256];
13408 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13409 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13410 szInstr1, sizeof(szInstr1), NULL);
13411 char szInstr2[256];
13412 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13413 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13414 szInstr2, sizeof(szInstr2), NULL);
13415
13416 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13417}
13418
13419
13420/**
13421 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13422 * dump to the assertion info.
13423 *
13424 * @param pEvtRec The record to dump.
13425 */
13426IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13427{
13428 switch (pEvtRec->enmEvent)
13429 {
13430 case IEMVERIFYEVENT_IOPORT_READ:
13431 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13432 pEvtRec->u.IOPortWrite.Port,
13433 pEvtRec->u.IOPortWrite.cbValue);
13434 break;
13435 case IEMVERIFYEVENT_IOPORT_WRITE:
13436 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13437 pEvtRec->u.IOPortWrite.Port,
13438 pEvtRec->u.IOPortWrite.cbValue,
13439 pEvtRec->u.IOPortWrite.u32Value);
13440 break;
13441 case IEMVERIFYEVENT_IOPORT_STR_READ:
13442 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13443 pEvtRec->u.IOPortStrWrite.Port,
13444 pEvtRec->u.IOPortStrWrite.cbValue,
13445 pEvtRec->u.IOPortStrWrite.cTransfers);
13446 break;
13447 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13448 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13449 pEvtRec->u.IOPortStrWrite.Port,
13450 pEvtRec->u.IOPortStrWrite.cbValue,
13451 pEvtRec->u.IOPortStrWrite.cTransfers);
13452 break;
13453 case IEMVERIFYEVENT_RAM_READ:
13454 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13455 pEvtRec->u.RamRead.GCPhys,
13456 pEvtRec->u.RamRead.cb);
13457 break;
13458 case IEMVERIFYEVENT_RAM_WRITE:
13459 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13460 pEvtRec->u.RamWrite.GCPhys,
13461 pEvtRec->u.RamWrite.cb,
13462 (int)pEvtRec->u.RamWrite.cb,
13463 pEvtRec->u.RamWrite.ab);
13464 break;
13465 default:
13466 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13467 break;
13468 }
13469}
13470
13471
13472/**
13473 * Raises an assertion on the specified record, showing the given message with
13474 * a record dump attached.
13475 *
13476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13477 * @param pEvtRec1 The first record.
13478 * @param pEvtRec2 The second record.
13479 * @param pszMsg The message explaining why we're asserting.
13480 */
13481IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13482{
13483 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13484 iemVerifyAssertAddRecordDump(pEvtRec1);
13485 iemVerifyAssertAddRecordDump(pEvtRec2);
13486 iemVerifyAssertMsg2(pVCpu);
13487 RTAssertPanic();
13488}
13489
13490
13491/**
13492 * Raises an assertion on the specified record, showing the given message with
13493 * a record dump attached.
13494 *
13495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13496 * @param pEvtRec1 The first record.
13497 * @param pszMsg The message explaining why we're asserting.
13498 */
13499IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13500{
13501 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13502 iemVerifyAssertAddRecordDump(pEvtRec);
13503 iemVerifyAssertMsg2(pVCpu);
13504 RTAssertPanic();
13505}
13506
13507
13508/**
13509 * Verifies a write record.
13510 *
13511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13512 * @param pEvtRec The write record.
13513 * @param fRem Set if REM was doing the other executing. If clear
13514 * it was HM.
13515 */
13516IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13517{
13518 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13519 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13520 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13521 if ( RT_FAILURE(rc)
13522 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13523 {
13524 /* fend off ins */
13525 if ( !pVCpu->iem.s.cIOReads
13526 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13527 || ( pEvtRec->u.RamWrite.cb != 1
13528 && pEvtRec->u.RamWrite.cb != 2
13529 && pEvtRec->u.RamWrite.cb != 4) )
13530 {
13531 /* fend off ROMs and MMIO */
13532 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13533 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13534 {
13535 /* fend off fxsave */
13536 if (pEvtRec->u.RamWrite.cb != 512)
13537 {
13538 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13539 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13540 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13541 RTAssertMsg2Add("%s: %.*Rhxs\n"
13542 "iem: %.*Rhxs\n",
13543 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13544 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13545 iemVerifyAssertAddRecordDump(pEvtRec);
13546 iemVerifyAssertMsg2(pVCpu);
13547 RTAssertPanic();
13548 }
13549 }
13550 }
13551 }
13552
13553}
13554
13555/**
13556 * Performs the post-execution verfication checks.
13557 */
13558IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13559{
13560 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13561 return rcStrictIem;
13562
13563 /*
13564 * Switch back the state.
13565 */
13566 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13567 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13568 Assert(pOrgCtx != pDebugCtx);
13569 IEM_GET_CTX(pVCpu) = pOrgCtx;
13570
13571 /*
13572 * Execute the instruction in REM.
13573 */
13574 bool fRem = false;
13575 PVM pVM = pVCpu->CTX_SUFF(pVM);
13576 PVMCPU pVCpu = pVCpu;
13577 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13578#ifdef IEM_VERIFICATION_MODE_FULL_HM
13579 if ( HMIsEnabled(pVM)
13580 && pVCpu->iem.s.cIOReads == 0
13581 && pVCpu->iem.s.cIOWrites == 0
13582 && !pVCpu->iem.s.fProblematicMemory)
13583 {
13584 uint64_t uStartRip = pOrgCtx->rip;
13585 unsigned iLoops = 0;
13586 do
13587 {
13588 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13589 iLoops++;
13590 } while ( rc == VINF_SUCCESS
13591 || ( rc == VINF_EM_DBG_STEPPED
13592 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13593 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13594 || ( pOrgCtx->rip != pDebugCtx->rip
13595 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13596 && iLoops < 8) );
13597 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13598 rc = VINF_SUCCESS;
13599 }
13600#endif
13601 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13602 || rc == VINF_IOM_R3_IOPORT_READ
13603 || rc == VINF_IOM_R3_IOPORT_WRITE
13604 || rc == VINF_IOM_R3_MMIO_READ
13605 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13606 || rc == VINF_IOM_R3_MMIO_WRITE
13607 || rc == VINF_CPUM_R3_MSR_READ
13608 || rc == VINF_CPUM_R3_MSR_WRITE
13609 || rc == VINF_EM_RESCHEDULE
13610 )
13611 {
13612 EMRemLock(pVM);
13613 rc = REMR3EmulateInstruction(pVM, pVCpu);
13614 AssertRC(rc);
13615 EMRemUnlock(pVM);
13616 fRem = true;
13617 }
13618
13619# if 1 /* Skip unimplemented instructions for now. */
13620 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13621 {
13622 IEM_GET_CTX(pVCpu) = pOrgCtx;
13623 if (rc == VINF_EM_DBG_STEPPED)
13624 return VINF_SUCCESS;
13625 return rc;
13626 }
13627# endif
13628
13629 /*
13630 * Compare the register states.
13631 */
13632 unsigned cDiffs = 0;
13633 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13634 {
13635 //Log(("REM and IEM ends up with different registers!\n"));
13636 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13637
13638# define CHECK_FIELD(a_Field) \
13639 do \
13640 { \
13641 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13642 { \
13643 switch (sizeof(pOrgCtx->a_Field)) \
13644 { \
13645 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13646 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13647 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13648 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13649 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13650 } \
13651 cDiffs++; \
13652 } \
13653 } while (0)
13654# define CHECK_XSTATE_FIELD(a_Field) \
13655 do \
13656 { \
13657 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13658 { \
13659 switch (sizeof(pOrgXState->a_Field)) \
13660 { \
13661 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13662 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13663 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13664 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13665 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13666 } \
13667 cDiffs++; \
13668 } \
13669 } while (0)
13670
13671# define CHECK_BIT_FIELD(a_Field) \
13672 do \
13673 { \
13674 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13675 { \
13676 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13677 cDiffs++; \
13678 } \
13679 } while (0)
13680
13681# define CHECK_SEL(a_Sel) \
13682 do \
13683 { \
13684 CHECK_FIELD(a_Sel.Sel); \
13685 CHECK_FIELD(a_Sel.Attr.u); \
13686 CHECK_FIELD(a_Sel.u64Base); \
13687 CHECK_FIELD(a_Sel.u32Limit); \
13688 CHECK_FIELD(a_Sel.fFlags); \
13689 } while (0)
13690
13691 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13692 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13693
13694#if 1 /* The recompiler doesn't update these the intel way. */
13695 if (fRem)
13696 {
13697 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13698 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13699 pOrgXState->x87.CS = pDebugXState->x87.CS;
13700 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13701 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13702 pOrgXState->x87.DS = pDebugXState->x87.DS;
13703 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13704 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13705 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13706 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13707 }
13708#endif
13709 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13710 {
13711 RTAssertMsg2Weak(" the FPU state differs\n");
13712 cDiffs++;
13713 CHECK_XSTATE_FIELD(x87.FCW);
13714 CHECK_XSTATE_FIELD(x87.FSW);
13715 CHECK_XSTATE_FIELD(x87.FTW);
13716 CHECK_XSTATE_FIELD(x87.FOP);
13717 CHECK_XSTATE_FIELD(x87.FPUIP);
13718 CHECK_XSTATE_FIELD(x87.CS);
13719 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13720 CHECK_XSTATE_FIELD(x87.FPUDP);
13721 CHECK_XSTATE_FIELD(x87.DS);
13722 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13723 CHECK_XSTATE_FIELD(x87.MXCSR);
13724 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13725 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13726 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13727 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13728 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13729 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13730 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13731 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13732 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13733 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13734 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13735 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13736 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13737 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13738 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13739 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13740 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13741 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13742 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13743 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13744 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13745 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13746 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13747 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13748 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13749 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13750 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13751 }
13752 CHECK_FIELD(rip);
13753 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13754 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13755 {
13756 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13757 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13758 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13759 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13760 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13761 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13762 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13763 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13764 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13765 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13766 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13767 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13768 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13769 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13770 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13771 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13772 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13773 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13774 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13775 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13776 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13777 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13778 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13779 }
13780
13781 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13782 CHECK_FIELD(rax);
13783 CHECK_FIELD(rcx);
13784 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13785 CHECK_FIELD(rdx);
13786 CHECK_FIELD(rbx);
13787 CHECK_FIELD(rsp);
13788 CHECK_FIELD(rbp);
13789 CHECK_FIELD(rsi);
13790 CHECK_FIELD(rdi);
13791 CHECK_FIELD(r8);
13792 CHECK_FIELD(r9);
13793 CHECK_FIELD(r10);
13794 CHECK_FIELD(r11);
13795 CHECK_FIELD(r12);
13796 CHECK_FIELD(r13);
13797 CHECK_SEL(cs);
13798 CHECK_SEL(ss);
13799 CHECK_SEL(ds);
13800 CHECK_SEL(es);
13801 CHECK_SEL(fs);
13802 CHECK_SEL(gs);
13803 CHECK_FIELD(cr0);
13804
13805 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13806 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13807 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13808 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13809 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13810 {
13811 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13812 { /* ignore */ }
13813 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13814 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13815 && fRem)
13816 { /* ignore */ }
13817 else
13818 CHECK_FIELD(cr2);
13819 }
13820 CHECK_FIELD(cr3);
13821 CHECK_FIELD(cr4);
13822 CHECK_FIELD(dr[0]);
13823 CHECK_FIELD(dr[1]);
13824 CHECK_FIELD(dr[2]);
13825 CHECK_FIELD(dr[3]);
13826 CHECK_FIELD(dr[6]);
13827 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13828 CHECK_FIELD(dr[7]);
13829 CHECK_FIELD(gdtr.cbGdt);
13830 CHECK_FIELD(gdtr.pGdt);
13831 CHECK_FIELD(idtr.cbIdt);
13832 CHECK_FIELD(idtr.pIdt);
13833 CHECK_SEL(ldtr);
13834 CHECK_SEL(tr);
13835 CHECK_FIELD(SysEnter.cs);
13836 CHECK_FIELD(SysEnter.eip);
13837 CHECK_FIELD(SysEnter.esp);
13838 CHECK_FIELD(msrEFER);
13839 CHECK_FIELD(msrSTAR);
13840 CHECK_FIELD(msrPAT);
13841 CHECK_FIELD(msrLSTAR);
13842 CHECK_FIELD(msrCSTAR);
13843 CHECK_FIELD(msrSFMASK);
13844 CHECK_FIELD(msrKERNELGSBASE);
13845
13846 if (cDiffs != 0)
13847 {
13848 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13849 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13850 RTAssertPanic();
13851 static bool volatile s_fEnterDebugger = true;
13852 if (s_fEnterDebugger)
13853 DBGFSTOP(pVM);
13854
13855# if 1 /* Ignore unimplemented instructions for now. */
13856 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13857 rcStrictIem = VINF_SUCCESS;
13858# endif
13859 }
13860# undef CHECK_FIELD
13861# undef CHECK_BIT_FIELD
13862 }
13863
13864 /*
13865 * If the register state compared fine, check the verification event
13866 * records.
13867 */
13868 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13869 {
13870 /*
13871 * Compare verficiation event records.
13872 * - I/O port accesses should be a 1:1 match.
13873 */
13874 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13875 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13876 while (pIemRec && pOtherRec)
13877 {
13878 /* Since we might miss RAM writes and reads, ignore reads and check
13879 that any written memory is the same extra ones. */
13880 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13881 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13882 && pIemRec->pNext)
13883 {
13884 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13885 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13886 pIemRec = pIemRec->pNext;
13887 }
13888
13889 /* Do the compare. */
13890 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13891 {
13892 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13893 break;
13894 }
13895 bool fEquals;
13896 switch (pIemRec->enmEvent)
13897 {
13898 case IEMVERIFYEVENT_IOPORT_READ:
13899 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13900 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13901 break;
13902 case IEMVERIFYEVENT_IOPORT_WRITE:
13903 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13904 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13905 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13906 break;
13907 case IEMVERIFYEVENT_IOPORT_STR_READ:
13908 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13909 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13910 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13911 break;
13912 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13913 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13914 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13915 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13916 break;
13917 case IEMVERIFYEVENT_RAM_READ:
13918 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13919 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13920 break;
13921 case IEMVERIFYEVENT_RAM_WRITE:
13922 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13923 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13924 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13925 break;
13926 default:
13927 fEquals = false;
13928 break;
13929 }
13930 if (!fEquals)
13931 {
13932 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13933 break;
13934 }
13935
13936 /* advance */
13937 pIemRec = pIemRec->pNext;
13938 pOtherRec = pOtherRec->pNext;
13939 }
13940
13941 /* Ignore extra writes and reads. */
13942 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13943 {
13944 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13945 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13946 pIemRec = pIemRec->pNext;
13947 }
13948 if (pIemRec != NULL)
13949 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13950 else if (pOtherRec != NULL)
13951 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13952 }
13953 IEM_GET_CTX(pVCpu) = pOrgCtx;
13954
13955 return rcStrictIem;
13956}
13957
13958#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13959
13960/* stubs */
13961IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13962{
13963 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13964 return VERR_INTERNAL_ERROR;
13965}
13966
13967IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13968{
13969 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13970 return VERR_INTERNAL_ERROR;
13971}
13972
13973#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13974
13975
13976#ifdef LOG_ENABLED
13977/**
13978 * Logs the current instruction.
13979 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13980 * @param pCtx The current CPU context.
13981 * @param fSameCtx Set if we have the same context information as the VMM,
13982 * clear if we may have already executed an instruction in
13983 * our debug context. When clear, we assume IEMCPU holds
13984 * valid CPU mode info.
13985 */
13986IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13987{
13988# ifdef IN_RING3
13989 if (LogIs2Enabled())
13990 {
13991 char szInstr[256];
13992 uint32_t cbInstr = 0;
13993 if (fSameCtx)
13994 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13995 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13996 szInstr, sizeof(szInstr), &cbInstr);
13997 else
13998 {
13999 uint32_t fFlags = 0;
14000 switch (pVCpu->iem.s.enmCpuMode)
14001 {
14002 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14003 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14004 case IEMMODE_16BIT:
14005 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14006 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14007 else
14008 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14009 break;
14010 }
14011 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14012 szInstr, sizeof(szInstr), &cbInstr);
14013 }
14014
14015 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14016 Log2(("****\n"
14017 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14018 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14019 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14020 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14021 " %s\n"
14022 ,
14023 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14024 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14025 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14026 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14027 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14028 szInstr));
14029
14030 if (LogIs3Enabled())
14031 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14032 }
14033 else
14034# endif
14035 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14036 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14037 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14038}
14039#endif
14040
14041
14042/**
14043 * Makes status code addjustments (pass up from I/O and access handler)
14044 * as well as maintaining statistics.
14045 *
14046 * @returns Strict VBox status code to pass up.
14047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14048 * @param rcStrict The status from executing an instruction.
14049 */
14050DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14051{
14052 if (rcStrict != VINF_SUCCESS)
14053 {
14054 if (RT_SUCCESS(rcStrict))
14055 {
14056 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14057 || rcStrict == VINF_IOM_R3_IOPORT_READ
14058 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14059 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14060 || rcStrict == VINF_IOM_R3_MMIO_READ
14061 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14062 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14063 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14064 || rcStrict == VINF_CPUM_R3_MSR_READ
14065 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14066 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14067 || rcStrict == VINF_EM_RAW_TO_R3
14068 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14069 /* raw-mode / virt handlers only: */
14070 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14071 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14072 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14073 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14074 || rcStrict == VINF_SELM_SYNC_GDT
14075 || rcStrict == VINF_CSAM_PENDING_ACTION
14076 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14077 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14078/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14079 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14080 if (rcPassUp == VINF_SUCCESS)
14081 pVCpu->iem.s.cRetInfStatuses++;
14082 else if ( rcPassUp < VINF_EM_FIRST
14083 || rcPassUp > VINF_EM_LAST
14084 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14085 {
14086 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14087 pVCpu->iem.s.cRetPassUpStatus++;
14088 rcStrict = rcPassUp;
14089 }
14090 else
14091 {
14092 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14093 pVCpu->iem.s.cRetInfStatuses++;
14094 }
14095 }
14096 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14097 pVCpu->iem.s.cRetAspectNotImplemented++;
14098 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14099 pVCpu->iem.s.cRetInstrNotImplemented++;
14100#ifdef IEM_VERIFICATION_MODE_FULL
14101 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14102 rcStrict = VINF_SUCCESS;
14103#endif
14104 else
14105 pVCpu->iem.s.cRetErrStatuses++;
14106 }
14107 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14108 {
14109 pVCpu->iem.s.cRetPassUpStatus++;
14110 rcStrict = pVCpu->iem.s.rcPassUp;
14111 }
14112
14113 return rcStrict;
14114}
14115
14116
14117/**
14118 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14119 * IEMExecOneWithPrefetchedByPC.
14120 *
14121 * Similar code is found in IEMExecLots.
14122 *
14123 * @return Strict VBox status code.
14124 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14126 * @param fExecuteInhibit If set, execute the instruction following CLI,
14127 * POP SS and MOV SS,GR.
14128 */
14129DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14130{
14131#ifdef IEM_WITH_SETJMP
14132 VBOXSTRICTRC rcStrict;
14133 jmp_buf JmpBuf;
14134 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14135 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14136 if ((rcStrict = setjmp(JmpBuf)) == 0)
14137 {
14138 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14139 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14140 }
14141 else
14142 pVCpu->iem.s.cLongJumps++;
14143 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14144#else
14145 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14146 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14147#endif
14148 if (rcStrict == VINF_SUCCESS)
14149 pVCpu->iem.s.cInstructions++;
14150 if (pVCpu->iem.s.cActiveMappings > 0)
14151 {
14152 Assert(rcStrict != VINF_SUCCESS);
14153 iemMemRollback(pVCpu);
14154 }
14155//#ifdef DEBUG
14156// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14157//#endif
14158
14159 /* Execute the next instruction as well if a cli, pop ss or
14160 mov ss, Gr has just completed successfully. */
14161 if ( fExecuteInhibit
14162 && rcStrict == VINF_SUCCESS
14163 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14164 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14165 {
14166 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14167 if (rcStrict == VINF_SUCCESS)
14168 {
14169#ifdef LOG_ENABLED
14170 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14171#endif
14172#ifdef IEM_WITH_SETJMP
14173 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14174 if ((rcStrict = setjmp(JmpBuf)) == 0)
14175 {
14176 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14177 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14178 }
14179 else
14180 pVCpu->iem.s.cLongJumps++;
14181 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14182#else
14183 IEM_OPCODE_GET_NEXT_U8(&b);
14184 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14185#endif
14186 if (rcStrict == VINF_SUCCESS)
14187 pVCpu->iem.s.cInstructions++;
14188 if (pVCpu->iem.s.cActiveMappings > 0)
14189 {
14190 Assert(rcStrict != VINF_SUCCESS);
14191 iemMemRollback(pVCpu);
14192 }
14193 }
14194 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14195 }
14196
14197 /*
14198 * Return value fiddling, statistics and sanity assertions.
14199 */
14200 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14201
14202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14204#if defined(IEM_VERIFICATION_MODE_FULL)
14205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14209#endif
14210 return rcStrict;
14211}
14212
14213
14214#ifdef IN_RC
14215/**
14216 * Re-enters raw-mode or ensure we return to ring-3.
14217 *
14218 * @returns rcStrict, maybe modified.
14219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14220 * @param pCtx The current CPU context.
14221 * @param rcStrict The status code returne by the interpreter.
14222 */
14223DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14224{
14225 if ( !pVCpu->iem.s.fInPatchCode
14226 && ( rcStrict == VINF_SUCCESS
14227 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14228 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14229 {
14230 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14231 CPUMRawEnter(pVCpu);
14232 else
14233 {
14234 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14235 rcStrict = VINF_EM_RESCHEDULE;
14236 }
14237 }
14238 return rcStrict;
14239}
14240#endif
14241
14242
14243/**
14244 * Execute one instruction.
14245 *
14246 * @return Strict VBox status code.
14247 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14248 */
14249VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14250{
14251#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14252 if (++pVCpu->iem.s.cVerifyDepth == 1)
14253 iemExecVerificationModeSetup(pVCpu);
14254#endif
14255#ifdef LOG_ENABLED
14256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14257 iemLogCurInstr(pVCpu, pCtx, true);
14258#endif
14259
14260 /*
14261 * Do the decoding and emulation.
14262 */
14263 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14264 if (rcStrict == VINF_SUCCESS)
14265 rcStrict = iemExecOneInner(pVCpu, true);
14266
14267#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14268 /*
14269 * Assert some sanity.
14270 */
14271 if (pVCpu->iem.s.cVerifyDepth == 1)
14272 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14273 pVCpu->iem.s.cVerifyDepth--;
14274#endif
14275#ifdef IN_RC
14276 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14277#endif
14278 if (rcStrict != VINF_SUCCESS)
14279 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14280 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14281 return rcStrict;
14282}
14283
14284
14285VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14286{
14287 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14288 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14289
14290 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14291 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14292 if (rcStrict == VINF_SUCCESS)
14293 {
14294 rcStrict = iemExecOneInner(pVCpu, true);
14295 if (pcbWritten)
14296 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14297 }
14298
14299#ifdef IN_RC
14300 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14301#endif
14302 return rcStrict;
14303}
14304
14305
14306VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14307 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14308{
14309 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14310 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14311
14312 VBOXSTRICTRC rcStrict;
14313 if ( cbOpcodeBytes
14314 && pCtx->rip == OpcodeBytesPC)
14315 {
14316 iemInitDecoder(pVCpu, false);
14317#ifdef IEM_WITH_CODE_TLB
14318 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14319 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14320 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14321 pVCpu->iem.s.offCurInstrStart = 0;
14322 pVCpu->iem.s.offInstrNextByte = 0;
14323#else
14324 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14325 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14326#endif
14327 rcStrict = VINF_SUCCESS;
14328 }
14329 else
14330 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14331 if (rcStrict == VINF_SUCCESS)
14332 {
14333 rcStrict = iemExecOneInner(pVCpu, true);
14334 }
14335
14336#ifdef IN_RC
14337 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14338#endif
14339 return rcStrict;
14340}
14341
14342
14343VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14344{
14345 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14346 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14347
14348 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14349 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14350 if (rcStrict == VINF_SUCCESS)
14351 {
14352 rcStrict = iemExecOneInner(pVCpu, false);
14353 if (pcbWritten)
14354 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14355 }
14356
14357#ifdef IN_RC
14358 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14359#endif
14360 return rcStrict;
14361}
14362
14363
14364VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14365 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14366{
14367 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14368 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14369
14370 VBOXSTRICTRC rcStrict;
14371 if ( cbOpcodeBytes
14372 && pCtx->rip == OpcodeBytesPC)
14373 {
14374 iemInitDecoder(pVCpu, true);
14375#ifdef IEM_WITH_CODE_TLB
14376 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14377 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14378 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14379 pVCpu->iem.s.offCurInstrStart = 0;
14380 pVCpu->iem.s.offInstrNextByte = 0;
14381#else
14382 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14383 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14384#endif
14385 rcStrict = VINF_SUCCESS;
14386 }
14387 else
14388 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14389 if (rcStrict == VINF_SUCCESS)
14390 rcStrict = iemExecOneInner(pVCpu, false);
14391
14392#ifdef IN_RC
14393 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14394#endif
14395 return rcStrict;
14396}
14397
14398
14399/**
14400 * For debugging DISGetParamSize, may come in handy.
14401 *
14402 * @returns Strict VBox status code.
14403 * @param pVCpu The cross context virtual CPU structure of the
14404 * calling EMT.
14405 * @param pCtxCore The context core structure.
14406 * @param OpcodeBytesPC The PC of the opcode bytes.
14407 * @param pvOpcodeBytes Prefeched opcode bytes.
14408 * @param cbOpcodeBytes Number of prefetched bytes.
14409 * @param pcbWritten Where to return the number of bytes written.
14410 * Optional.
14411 */
14412VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14413 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14414 uint32_t *pcbWritten)
14415{
14416 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14417 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14418
14419 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14420 VBOXSTRICTRC rcStrict;
14421 if ( cbOpcodeBytes
14422 && pCtx->rip == OpcodeBytesPC)
14423 {
14424 iemInitDecoder(pVCpu, true);
14425#ifdef IEM_WITH_CODE_TLB
14426 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14427 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14428 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14429 pVCpu->iem.s.offCurInstrStart = 0;
14430 pVCpu->iem.s.offInstrNextByte = 0;
14431#else
14432 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14433 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14434#endif
14435 rcStrict = VINF_SUCCESS;
14436 }
14437 else
14438 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14439 if (rcStrict == VINF_SUCCESS)
14440 {
14441 rcStrict = iemExecOneInner(pVCpu, false);
14442 if (pcbWritten)
14443 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14444 }
14445
14446#ifdef IN_RC
14447 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14448#endif
14449 return rcStrict;
14450}
14451
14452
14453VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14454{
14455 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14456
14457#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14458 /*
14459 * See if there is an interrupt pending in TRPM, inject it if we can.
14460 */
14461 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14462# ifdef IEM_VERIFICATION_MODE_FULL
14463 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14464# endif
14465 if ( pCtx->eflags.Bits.u1IF
14466 && TRPMHasTrap(pVCpu)
14467 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14468 {
14469 uint8_t u8TrapNo;
14470 TRPMEVENT enmType;
14471 RTGCUINT uErrCode;
14472 RTGCPTR uCr2;
14473 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14474 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14475 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14476 TRPMResetTrap(pVCpu);
14477 }
14478
14479 /*
14480 * Log the state.
14481 */
14482# ifdef LOG_ENABLED
14483 iemLogCurInstr(pVCpu, pCtx, true);
14484# endif
14485
14486 /*
14487 * Do the decoding and emulation.
14488 */
14489 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14490 if (rcStrict == VINF_SUCCESS)
14491 rcStrict = iemExecOneInner(pVCpu, true);
14492
14493 /*
14494 * Assert some sanity.
14495 */
14496 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14497
14498 /*
14499 * Log and return.
14500 */
14501 if (rcStrict != VINF_SUCCESS)
14502 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14503 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14504 if (pcInstructions)
14505 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14506 return rcStrict;
14507
14508#else /* Not verification mode */
14509
14510 /*
14511 * See if there is an interrupt pending in TRPM, inject it if we can.
14512 */
14513 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14514# ifdef IEM_VERIFICATION_MODE_FULL
14515 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14516# endif
14517 if ( pCtx->eflags.Bits.u1IF
14518 && TRPMHasTrap(pVCpu)
14519 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14520 {
14521 uint8_t u8TrapNo;
14522 TRPMEVENT enmType;
14523 RTGCUINT uErrCode;
14524 RTGCPTR uCr2;
14525 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14526 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14527 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14528 TRPMResetTrap(pVCpu);
14529 }
14530
14531 /*
14532 * Initial decoder init w/ prefetch, then setup setjmp.
14533 */
14534 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14535 if (rcStrict == VINF_SUCCESS)
14536 {
14537# ifdef IEM_WITH_SETJMP
14538 jmp_buf JmpBuf;
14539 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14540 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14541 pVCpu->iem.s.cActiveMappings = 0;
14542 if ((rcStrict = setjmp(JmpBuf)) == 0)
14543# endif
14544 {
14545 /*
14546 * The run loop. We limit ourselves to 4096 instructions right now.
14547 */
14548 PVM pVM = pVCpu->CTX_SUFF(pVM);
14549 uint32_t cInstr = 4096;
14550 for (;;)
14551 {
14552 /*
14553 * Log the state.
14554 */
14555# ifdef LOG_ENABLED
14556 iemLogCurInstr(pVCpu, pCtx, true);
14557# endif
14558
14559 /*
14560 * Do the decoding and emulation.
14561 */
14562 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14563 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14564 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14565 {
14566 Assert(pVCpu->iem.s.cActiveMappings == 0);
14567 pVCpu->iem.s.cInstructions++;
14568 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14569 {
14570 uint32_t fCpu = pVCpu->fLocalForcedActions
14571 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14572 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14573 | VMCPU_FF_TLB_FLUSH
14574# ifdef VBOX_WITH_RAW_MODE
14575 | VMCPU_FF_TRPM_SYNC_IDT
14576 | VMCPU_FF_SELM_SYNC_TSS
14577 | VMCPU_FF_SELM_SYNC_GDT
14578 | VMCPU_FF_SELM_SYNC_LDT
14579# endif
14580 | VMCPU_FF_INHIBIT_INTERRUPTS
14581 | VMCPU_FF_BLOCK_NMIS
14582 | VMCPU_FF_UNHALT ));
14583
14584 if (RT_LIKELY( ( !fCpu
14585 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14586 && !pCtx->rflags.Bits.u1IF) )
14587 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14588 {
14589 if (cInstr-- > 0)
14590 {
14591 Assert(pVCpu->iem.s.cActiveMappings == 0);
14592 iemReInitDecoder(pVCpu);
14593 continue;
14594 }
14595 }
14596 }
14597 Assert(pVCpu->iem.s.cActiveMappings == 0);
14598 }
14599 else if (pVCpu->iem.s.cActiveMappings > 0)
14600 iemMemRollback(pVCpu);
14601 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14602 break;
14603 }
14604 }
14605# ifdef IEM_WITH_SETJMP
14606 else
14607 {
14608 if (pVCpu->iem.s.cActiveMappings > 0)
14609 iemMemRollback(pVCpu);
14610 pVCpu->iem.s.cLongJumps++;
14611 }
14612 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14613# endif
14614
14615 /*
14616 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14617 */
14618 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14619 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14620# if defined(IEM_VERIFICATION_MODE_FULL)
14621 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14622 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14623 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14624 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14625# endif
14626 }
14627
14628 /*
14629 * Maybe re-enter raw-mode and log.
14630 */
14631# ifdef IN_RC
14632 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14633# endif
14634 if (rcStrict != VINF_SUCCESS)
14635 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14636 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14637 if (pcInstructions)
14638 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14639 return rcStrict;
14640#endif /* Not verification mode */
14641}
14642
14643
14644
14645/**
14646 * Injects a trap, fault, abort, software interrupt or external interrupt.
14647 *
14648 * The parameter list matches TRPMQueryTrapAll pretty closely.
14649 *
14650 * @returns Strict VBox status code.
14651 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14652 * @param u8TrapNo The trap number.
14653 * @param enmType What type is it (trap/fault/abort), software
14654 * interrupt or hardware interrupt.
14655 * @param uErrCode The error code if applicable.
14656 * @param uCr2 The CR2 value if applicable.
14657 * @param cbInstr The instruction length (only relevant for
14658 * software interrupts).
14659 */
14660VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14661 uint8_t cbInstr)
14662{
14663 iemInitDecoder(pVCpu, false);
14664#ifdef DBGFTRACE_ENABLED
14665 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14666 u8TrapNo, enmType, uErrCode, uCr2);
14667#endif
14668
14669 uint32_t fFlags;
14670 switch (enmType)
14671 {
14672 case TRPM_HARDWARE_INT:
14673 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14674 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14675 uErrCode = uCr2 = 0;
14676 break;
14677
14678 case TRPM_SOFTWARE_INT:
14679 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14680 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14681 uErrCode = uCr2 = 0;
14682 break;
14683
14684 case TRPM_TRAP:
14685 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14686 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14687 if (u8TrapNo == X86_XCPT_PF)
14688 fFlags |= IEM_XCPT_FLAGS_CR2;
14689 switch (u8TrapNo)
14690 {
14691 case X86_XCPT_DF:
14692 case X86_XCPT_TS:
14693 case X86_XCPT_NP:
14694 case X86_XCPT_SS:
14695 case X86_XCPT_PF:
14696 case X86_XCPT_AC:
14697 fFlags |= IEM_XCPT_FLAGS_ERR;
14698 break;
14699
14700 case X86_XCPT_NMI:
14701 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14702 break;
14703 }
14704 break;
14705
14706 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14707 }
14708
14709 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14710}
14711
14712
14713/**
14714 * Injects the active TRPM event.
14715 *
14716 * @returns Strict VBox status code.
14717 * @param pVCpu The cross context virtual CPU structure.
14718 */
14719VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14720{
14721#ifndef IEM_IMPLEMENTS_TASKSWITCH
14722 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14723#else
14724 uint8_t u8TrapNo;
14725 TRPMEVENT enmType;
14726 RTGCUINT uErrCode;
14727 RTGCUINTPTR uCr2;
14728 uint8_t cbInstr;
14729 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14730 if (RT_FAILURE(rc))
14731 return rc;
14732
14733 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14734
14735 /** @todo Are there any other codes that imply the event was successfully
14736 * delivered to the guest? See @bugref{6607}. */
14737 if ( rcStrict == VINF_SUCCESS
14738 || rcStrict == VINF_IEM_RAISED_XCPT)
14739 {
14740 TRPMResetTrap(pVCpu);
14741 }
14742 return rcStrict;
14743#endif
14744}
14745
14746
14747VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14748{
14749 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14750 return VERR_NOT_IMPLEMENTED;
14751}
14752
14753
14754VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14755{
14756 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14757 return VERR_NOT_IMPLEMENTED;
14758}
14759
14760
14761#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14762/**
14763 * Executes a IRET instruction with default operand size.
14764 *
14765 * This is for PATM.
14766 *
14767 * @returns VBox status code.
14768 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14769 * @param pCtxCore The register frame.
14770 */
14771VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14772{
14773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14774
14775 iemCtxCoreToCtx(pCtx, pCtxCore);
14776 iemInitDecoder(pVCpu);
14777 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14778 if (rcStrict == VINF_SUCCESS)
14779 iemCtxToCtxCore(pCtxCore, pCtx);
14780 else
14781 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14782 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14783 return rcStrict;
14784}
14785#endif
14786
14787
14788/**
14789 * Macro used by the IEMExec* method to check the given instruction length.
14790 *
14791 * Will return on failure!
14792 *
14793 * @param a_cbInstr The given instruction length.
14794 * @param a_cbMin The minimum length.
14795 */
14796#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14797 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14798 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14799
14800
14801/**
14802 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14803 *
14804 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14805 *
14806 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14808 * @param rcStrict The status code to fiddle.
14809 */
14810DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14811{
14812 iemUninitExec(pVCpu);
14813#ifdef IN_RC
14814 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14815 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14816#else
14817 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14818#endif
14819}
14820
14821
14822/**
14823 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14824 *
14825 * This API ASSUMES that the caller has already verified that the guest code is
14826 * allowed to access the I/O port. (The I/O port is in the DX register in the
14827 * guest state.)
14828 *
14829 * @returns Strict VBox status code.
14830 * @param pVCpu The cross context virtual CPU structure.
14831 * @param cbValue The size of the I/O port access (1, 2, or 4).
14832 * @param enmAddrMode The addressing mode.
14833 * @param fRepPrefix Indicates whether a repeat prefix is used
14834 * (doesn't matter which for this instruction).
14835 * @param cbInstr The instruction length in bytes.
14836 * @param iEffSeg The effective segment address.
14837 * @param fIoChecked Whether the access to the I/O port has been
14838 * checked or not. It's typically checked in the
14839 * HM scenario.
14840 */
14841VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14842 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14843{
14844 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14845 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14846
14847 /*
14848 * State init.
14849 */
14850 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14851
14852 /*
14853 * Switch orgy for getting to the right handler.
14854 */
14855 VBOXSTRICTRC rcStrict;
14856 if (fRepPrefix)
14857 {
14858 switch (enmAddrMode)
14859 {
14860 case IEMMODE_16BIT:
14861 switch (cbValue)
14862 {
14863 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14864 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14865 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14866 default:
14867 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14868 }
14869 break;
14870
14871 case IEMMODE_32BIT:
14872 switch (cbValue)
14873 {
14874 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14875 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14876 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14877 default:
14878 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14879 }
14880 break;
14881
14882 case IEMMODE_64BIT:
14883 switch (cbValue)
14884 {
14885 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14887 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14888 default:
14889 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14890 }
14891 break;
14892
14893 default:
14894 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14895 }
14896 }
14897 else
14898 {
14899 switch (enmAddrMode)
14900 {
14901 case IEMMODE_16BIT:
14902 switch (cbValue)
14903 {
14904 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14905 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14906 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14907 default:
14908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14909 }
14910 break;
14911
14912 case IEMMODE_32BIT:
14913 switch (cbValue)
14914 {
14915 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14916 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14917 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14918 default:
14919 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14920 }
14921 break;
14922
14923 case IEMMODE_64BIT:
14924 switch (cbValue)
14925 {
14926 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14927 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14928 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14929 default:
14930 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14931 }
14932 break;
14933
14934 default:
14935 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14936 }
14937 }
14938
14939 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14940}
14941
14942
14943/**
14944 * Interface for HM and EM for executing string I/O IN (read) instructions.
14945 *
14946 * This API ASSUMES that the caller has already verified that the guest code is
14947 * allowed to access the I/O port. (The I/O port is in the DX register in the
14948 * guest state.)
14949 *
14950 * @returns Strict VBox status code.
14951 * @param pVCpu The cross context virtual CPU structure.
14952 * @param cbValue The size of the I/O port access (1, 2, or 4).
14953 * @param enmAddrMode The addressing mode.
14954 * @param fRepPrefix Indicates whether a repeat prefix is used
14955 * (doesn't matter which for this instruction).
14956 * @param cbInstr The instruction length in bytes.
14957 * @param fIoChecked Whether the access to the I/O port has been
14958 * checked or not. It's typically checked in the
14959 * HM scenario.
14960 */
14961VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14962 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14963{
14964 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14965
14966 /*
14967 * State init.
14968 */
14969 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14970
14971 /*
14972 * Switch orgy for getting to the right handler.
14973 */
14974 VBOXSTRICTRC rcStrict;
14975 if (fRepPrefix)
14976 {
14977 switch (enmAddrMode)
14978 {
14979 case IEMMODE_16BIT:
14980 switch (cbValue)
14981 {
14982 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14983 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14984 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14985 default:
14986 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14987 }
14988 break;
14989
14990 case IEMMODE_32BIT:
14991 switch (cbValue)
14992 {
14993 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14994 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14995 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14996 default:
14997 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14998 }
14999 break;
15000
15001 case IEMMODE_64BIT:
15002 switch (cbValue)
15003 {
15004 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15005 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15006 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15007 default:
15008 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15009 }
15010 break;
15011
15012 default:
15013 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15014 }
15015 }
15016 else
15017 {
15018 switch (enmAddrMode)
15019 {
15020 case IEMMODE_16BIT:
15021 switch (cbValue)
15022 {
15023 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15024 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15025 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15026 default:
15027 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15028 }
15029 break;
15030
15031 case IEMMODE_32BIT:
15032 switch (cbValue)
15033 {
15034 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15035 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15036 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15037 default:
15038 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15039 }
15040 break;
15041
15042 case IEMMODE_64BIT:
15043 switch (cbValue)
15044 {
15045 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15046 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15047 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15048 default:
15049 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15050 }
15051 break;
15052
15053 default:
15054 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15055 }
15056 }
15057
15058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15059}
15060
15061
15062/**
15063 * Interface for rawmode to write execute an OUT instruction.
15064 *
15065 * @returns Strict VBox status code.
15066 * @param pVCpu The cross context virtual CPU structure.
15067 * @param cbInstr The instruction length in bytes.
15068 * @param u16Port The port to read.
15069 * @param cbReg The register size.
15070 *
15071 * @remarks In ring-0 not all of the state needs to be synced in.
15072 */
15073VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15074{
15075 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15076 Assert(cbReg <= 4 && cbReg != 3);
15077
15078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15079 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15081}
15082
15083
15084/**
15085 * Interface for rawmode to write execute an IN instruction.
15086 *
15087 * @returns Strict VBox status code.
15088 * @param pVCpu The cross context virtual CPU structure.
15089 * @param cbInstr The instruction length in bytes.
15090 * @param u16Port The port to read.
15091 * @param cbReg The register size.
15092 */
15093VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15094{
15095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15096 Assert(cbReg <= 4 && cbReg != 3);
15097
15098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15101}
15102
15103
15104/**
15105 * Interface for HM and EM to write to a CRx register.
15106 *
15107 * @returns Strict VBox status code.
15108 * @param pVCpu The cross context virtual CPU structure.
15109 * @param cbInstr The instruction length in bytes.
15110 * @param iCrReg The control register number (destination).
15111 * @param iGReg The general purpose register number (source).
15112 *
15113 * @remarks In ring-0 not all of the state needs to be synced in.
15114 */
15115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15116{
15117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15118 Assert(iCrReg < 16);
15119 Assert(iGReg < 16);
15120
15121 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15122 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15123 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15124}
15125
15126
15127/**
15128 * Interface for HM and EM to read from a CRx register.
15129 *
15130 * @returns Strict VBox status code.
15131 * @param pVCpu The cross context virtual CPU structure.
15132 * @param cbInstr The instruction length in bytes.
15133 * @param iGReg The general purpose register number (destination).
15134 * @param iCrReg The control register number (source).
15135 *
15136 * @remarks In ring-0 not all of the state needs to be synced in.
15137 */
15138VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15139{
15140 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15141 Assert(iCrReg < 16);
15142 Assert(iGReg < 16);
15143
15144 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15145 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15147}
15148
15149
15150/**
15151 * Interface for HM and EM to clear the CR0[TS] bit.
15152 *
15153 * @returns Strict VBox status code.
15154 * @param pVCpu The cross context virtual CPU structure.
15155 * @param cbInstr The instruction length in bytes.
15156 *
15157 * @remarks In ring-0 not all of the state needs to be synced in.
15158 */
15159VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15160{
15161 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15162
15163 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15164 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15166}
15167
15168
15169/**
15170 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15171 *
15172 * @returns Strict VBox status code.
15173 * @param pVCpu The cross context virtual CPU structure.
15174 * @param cbInstr The instruction length in bytes.
15175 * @param uValue The value to load into CR0.
15176 *
15177 * @remarks In ring-0 not all of the state needs to be synced in.
15178 */
15179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15180{
15181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15182
15183 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15184 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15186}
15187
15188
15189/**
15190 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15191 *
15192 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15193 *
15194 * @returns Strict VBox status code.
15195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15196 * @param cbInstr The instruction length in bytes.
15197 * @remarks In ring-0 not all of the state needs to be synced in.
15198 * @thread EMT(pVCpu)
15199 */
15200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15201{
15202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15203
15204 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15206 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15207}
15208
15209
15210/**
15211 * Checks if IEM is in the process of delivering an event (interrupt or
15212 * exception).
15213 *
15214 * @returns true if we're in the process of raising an interrupt or exception,
15215 * false otherwise.
15216 * @param pVCpu The cross context virtual CPU structure.
15217 * @param puVector Where to store the vector associated with the
15218 * currently delivered event, optional.
15219 * @param pfFlags Where to store th event delivery flags (see
15220 * IEM_XCPT_FLAGS_XXX), optional.
15221 * @param puErr Where to store the error code associated with the
15222 * event, optional.
15223 * @param puCr2 Where to store the CR2 associated with the event,
15224 * optional.
15225 */
15226VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15227{
15228 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15229 if (fRaisingXcpt)
15230 {
15231 if (puVector)
15232 *puVector = pVCpu->iem.s.uCurXcpt;
15233 if (pfFlags)
15234 *pfFlags = pVCpu->iem.s.fCurXcpt;
15235 /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */
15236 if (puErr)
15237 *puErr = pVCpu->iem.s.uCurXcptErr;
15238 if (puCr2)
15239 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15240 }
15241 return fRaisingXcpt;
15242}
15243
15244
15245#ifdef VBOX_WITH_NESTED_HWVIRT
15246/**
15247 * Interface for HM and EM to emulate the STGI instruction.
15248 *
15249 * @returns Strict VBox status code.
15250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15251 * @param cbInstr The instruction length in bytes.
15252 * @thread EMT(pVCpu)
15253 */
15254VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15255{
15256 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15257
15258 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15261}
15262
15263
15264/**
15265 * Interface for HM and EM to emulate the STGI instruction.
15266 *
15267 * @returns Strict VBox status code.
15268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15269 * @param cbInstr The instruction length in bytes.
15270 * @thread EMT(pVCpu)
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15275
15276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15277 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15279}
15280
15281
15282/**
15283 * Interface for HM and EM to emulate the VMLOAD instruction.
15284 *
15285 * @returns Strict VBox status code.
15286 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15287 * @param cbInstr The instruction length in bytes.
15288 * @thread EMT(pVCpu)
15289 */
15290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15291{
15292 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15293
15294 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15295 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15296 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15297}
15298
15299
15300/**
15301 * Interface for HM and EM to emulate the VMSAVE instruction.
15302 *
15303 * @returns Strict VBox status code.
15304 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15305 * @param cbInstr The instruction length in bytes.
15306 * @thread EMT(pVCpu)
15307 */
15308VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15309{
15310 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15311
15312 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15313 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15314 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15315}
15316
15317
15318/**
15319 * Interface for HM and EM to emulate the INVLPGA instruction.
15320 *
15321 * @returns Strict VBox status code.
15322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15323 * @param cbInstr The instruction length in bytes.
15324 * @thread EMT(pVCpu)
15325 */
15326VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15327{
15328 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15329
15330 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15331 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15332 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15333}
15334#endif /* VBOX_WITH_NESTED_HWVIRT */
15335
15336#ifdef IN_RING3
15337
15338/**
15339 * Handles the unlikely and probably fatal merge cases.
15340 *
15341 * @returns Merged status code.
15342 * @param rcStrict Current EM status code.
15343 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15344 * with @a rcStrict.
15345 * @param iMemMap The memory mapping index. For error reporting only.
15346 * @param pVCpu The cross context virtual CPU structure of the calling
15347 * thread, for error reporting only.
15348 */
15349DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15350 unsigned iMemMap, PVMCPU pVCpu)
15351{
15352 if (RT_FAILURE_NP(rcStrict))
15353 return rcStrict;
15354
15355 if (RT_FAILURE_NP(rcStrictCommit))
15356 return rcStrictCommit;
15357
15358 if (rcStrict == rcStrictCommit)
15359 return rcStrictCommit;
15360
15361 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15362 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15363 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15364 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15366 return VERR_IOM_FF_STATUS_IPE;
15367}
15368
15369
15370/**
15371 * Helper for IOMR3ProcessForceFlag.
15372 *
15373 * @returns Merged status code.
15374 * @param rcStrict Current EM status code.
15375 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15376 * with @a rcStrict.
15377 * @param iMemMap The memory mapping index. For error reporting only.
15378 * @param pVCpu The cross context virtual CPU structure of the calling
15379 * thread, for error reporting only.
15380 */
15381DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15382{
15383 /* Simple. */
15384 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15385 return rcStrictCommit;
15386
15387 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15388 return rcStrict;
15389
15390 /* EM scheduling status codes. */
15391 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15392 && rcStrict <= VINF_EM_LAST))
15393 {
15394 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15395 && rcStrictCommit <= VINF_EM_LAST))
15396 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15397 }
15398
15399 /* Unlikely */
15400 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15401}
15402
15403
15404/**
15405 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15406 *
15407 * @returns Merge between @a rcStrict and what the commit operation returned.
15408 * @param pVM The cross context VM structure.
15409 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15410 * @param rcStrict The status code returned by ring-0 or raw-mode.
15411 */
15412VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15413{
15414 /*
15415 * Reset the pending commit.
15416 */
15417 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15418 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15419 ("%#x %#x %#x\n",
15420 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15421 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15422
15423 /*
15424 * Commit the pending bounce buffers (usually just one).
15425 */
15426 unsigned cBufs = 0;
15427 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15428 while (iMemMap-- > 0)
15429 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15430 {
15431 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15432 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15433 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15434
15435 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15436 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15437 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15438
15439 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15440 {
15441 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15443 pbBuf,
15444 cbFirst,
15445 PGMACCESSORIGIN_IEM);
15446 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15447 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15448 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15449 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15450 }
15451
15452 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15453 {
15454 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15456 pbBuf + cbFirst,
15457 cbSecond,
15458 PGMACCESSORIGIN_IEM);
15459 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15460 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15461 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15462 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15463 }
15464 cBufs++;
15465 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15466 }
15467
15468 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15469 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15470 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15471 pVCpu->iem.s.cActiveMappings = 0;
15472 return rcStrict;
15473}
15474
15475#endif /* IN_RING3 */
15476
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette